diff --git a/docs/reference/services/app-orchestration/amazon-ecs-cluster.md b/docs/reference/services/app-orchestration/amazon-ecs-cluster.md
index 4e3afb3885..094ea60c21 100644
--- a/docs/reference/services/app-orchestration/amazon-ecs-cluster.md
+++ b/docs/reference/services/app-orchestration/amazon-ecs-cluster.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon ECS Cluster
-View Source
+View Source
Release Notes
@@ -108,9 +108,9 @@ For info on finding your Docker container logs and custom metrics in CloudWatch,
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/test): Automated tests for the modules and examples.
## Deploy
@@ -118,7 +118,7 @@ For info on finding your Docker container logs and custom metrics in CloudWatch,
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -126,7 +126,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -153,22 +153,22 @@ For information on how to manage your ECS cluster, see the documentation in the
module "ecs_cluster" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/ecs-cluster?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/ecs-cluster?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
# ----------------------------------------------------------------------------------------------------
- # The AMI to run on each instance in the ECS cluster. You can build the AMI using
- # the Packer template ecs-node-al2.json. One of var.cluster_instance_ami or
- # var.cluster_instance_ami_filters is required.
+ # The AMI to run on each instance in the ECS cluster. You can build the AMI
+ # using the Packer template ecs-node-al2.json. One of var.cluster_instance_ami
+ # or var.cluster_instance_ami_filters is required.
cluster_instance_ami =
- # Properties on the AMI that can be used to lookup a prebuilt AMI for use with ECS
- # workers. You can build the AMI using the Packer template ecs-node-al2.json. Only
- # used if var.cluster_instance_ami is null. One of var.cluster_instance_ami or
- # var.cluster_instance_ami_filters is required. Set to null if
- # cluster_instance_ami is set.
+ # Properties on the AMI that can be used to lookup a prebuilt AMI for use with
+ # ECS workers. You can build the AMI using the Packer template
+ # ecs-node-al2.json. Only used if var.cluster_instance_ami is null. One of
+ # var.cluster_instance_ami or var.cluster_instance_ami_filters is required.
+ # Set to null if cluster_instance_ami is set.
cluster_instance_ami_filters =
- # Properties on the AMI that can be used to lookup a prebuilt AMI for use with ECS
- # workers. You can build the AMI using the Packer template ecs-node-al2.json. Only
- # used if var.cluster_instance_ami is null. One of var.cluster_instance_ami or
- # var.cluster_instance_ami_filters is required. Set to null if
- # cluster_instance_ami is set.
+ # Properties on the AMI that can be used to lookup a prebuilt AMI for use with
+ # ECS workers. You can build the AMI using the Packer template
+ # ecs-node-al2.json. Only used if var.cluster_instance_ami is null. One of
+ # var.cluster_instance_ami or var.cluster_instance_ami_filters is required.
+ # Set to null if cluster_instance_ami is set.
cluster_instance_ami_filters =
diff --git a/docs/reference/services/app-orchestration/amazon-ecs-fargate-cluster.md b/docs/reference/services/app-orchestration/amazon-ecs-fargate-cluster.md
index 202d5da63e..d2e772caa0 100644
--- a/docs/reference/services/app-orchestration/amazon-ecs-fargate-cluster.md
+++ b/docs/reference/services/app-orchestration/amazon-ecs-fargate-cluster.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon ECS Fargate Cluster
-View Source
+View Source
Release Notes
@@ -64,9 +64,9 @@ To understand core concepts like what is ECS, and the different cluster types, s
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/test): Automated tests for the modules and examples.
## Deploy
@@ -74,7 +74,7 @@ To understand core concepts like what is ECS, and the different cluster types, s
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -82,7 +82,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -106,7 +106,7 @@ For information on how to manage your ECS cluster, see the documentation in the
module "ecs_fargate_cluster" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/ecs-fargate-cluster?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/ecs-fargate-cluster?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -119,8 +119,8 @@ module "ecs_fargate_cluster" {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # A map of custom tags to apply to the ECS Cluster. The key is the tag name and
- # the value is the tag value.
+ # A map of custom tags to apply to the ECS Cluster. The key is the tag name
+ # and the value is the tag value.
custom_tags = {}
# Whether or not to enable container insights monitoring on the ECS cluster.
@@ -141,7 +141,7 @@ module "ecs_fargate_cluster" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/ecs-fargate-cluster?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/ecs-fargate-cluster?ref=v0.104.12"
}
inputs = {
@@ -157,8 +157,8 @@ inputs = {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # A map of custom tags to apply to the ECS Cluster. The key is the tag name and
- # the value is the tag value.
+ # A map of custom tags to apply to the ECS Cluster. The key is the tag name
+ # and the value is the tag value.
custom_tags = {}
# Whether or not to enable container insights monitoring on the ECS cluster.
@@ -236,11 +236,11 @@ The name of the ECS cluster.
diff --git a/docs/reference/services/app-orchestration/amazon-ecs-service.md b/docs/reference/services/app-orchestration/amazon-ecs-service.md
index d363774279..7d1e874f72 100644
--- a/docs/reference/services/app-orchestration/amazon-ecs-service.md
+++ b/docs/reference/services/app-orchestration/amazon-ecs-service.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon ECS Service
-View Source
+View Source
Release Notes
@@ -63,10 +63,10 @@ more, see the documentation in the
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal
submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/test): Automated tests for the modules and examples.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/test): Automated tests for the modules and examples.
## Deploy
@@ -74,14 +74,14 @@ more, see the documentation in the
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and testing (but not direct production usage).
### Production deployment
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -105,24 +105,24 @@ For information on how to manage your ECS service, see the documentation in the
module "ecs_service" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/ecs-service?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/ecs-service?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
# ----------------------------------------------------------------------------------------------------
- # List of container definitions to use for the ECS task. Each entry corresponds to
- # a different ECS container definition.
+ # List of container definitions to use for the ECS task. Each entry
+ # corresponds to a different ECS container definition.
container_definitions =
# A map of all the listeners on the load balancer. The keys should be the port
# numbers and the values should be the ARN of the listener for that port.
default_listener_arns =
- # The default port numbers on the load balancer to attach listener rules to. You
- # can override this default on a rule-by-rule basis by setting the listener_ports
- # parameter in each rule. The port numbers specified in this variable and the
- # listener_ports parameter must exist in var.listener_arns.
+ # The default port numbers on the load balancer to attach listener rules to.
+ # You can override this default on a rule-by-rule basis by setting the
+ # listener_ports parameter in each rule. The port numbers specified in this
+ # variable and the listener_ports parameter must exist in var.listener_arns.
default_listener_ports =
# The ARN of the cluster to which the ecs service should be deployed.
@@ -143,16 +143,16 @@ module "ecs_service" {
# A list of SNS topic ARNs to notify when the route53 health check changes to
# ALARM, OK, or INSUFFICIENT_DATA state. Note: these SNS topics MUST be in
- # us-east-1! This is because Route 53 only sends CloudWatch metrics to us-east-1,
- # so we must create the alarm in that region, and therefore, can only notify SNS
- # topics in that region
+ # us-east-1! This is because Route 53 only sends CloudWatch metrics to
+ # us-east-1, so we must create the alarm in that region, and therefore, can
+ # only notify SNS topics in that region
alarm_sns_topic_arns_us_east_1 = []
# The time period, in seconds, during which requests from a client should be
# routed to the same Target. After this time period expires, the load
- # balancer-generated cookie is considered stale. The acceptable range is 1 second
- # to 1 week (604800 seconds). The default value is 1 day (86400 seconds). Only
- # used if var.elb_target_groups is set.
+ # balancer-generated cookie is considered stale. The acceptable range is 1
+ # second to 1 week (604800 seconds). The default value is 1 day (86400
+ # seconds). Only used if var.elb_target_groups is set.
alb_sticky_session_cookie_duration = 86400
# The type of Sticky Sessions to use. See https://goo.gl/MNwqNu for possible
@@ -163,42 +163,42 @@ module "ecs_service" {
# corresponds to a different ECS container definition.
canary_container_definitions = []
- # Which version of the ECS Service Docker container to deploy as a canary (e.g.
- # 0.57)
+ # Which version of the ECS Service Docker container to deploy as a canary
+ # (e.g. 0.57)
canary_version = null
- # The capacity provider strategy to use for the service. Note that the capacity
- # providers have to be present on the ECS cluster before deploying the ECS
- # service. When provided, var.launch_type is ignored.
+ # The capacity provider strategy to use for the service. Note that the
+ # capacity providers have to be present on the ECS cluster before deploying
+ # the ECS service. When provided, var.launch_type is ignored.
capacity_provider_strategy = []
- # The name of the container, as it appears in the var.task_arn Task definition, to
- # associate with a CLB. Currently, ECS can only associate a CLB with a single
- # container per service. Only used if clb_name is set.
+ # The name of the container, as it appears in the var.task_arn Task
+ # definition, to associate with a CLB. Currently, ECS can only associate a CLB
+ # with a single container per service. Only used if clb_name is set.
clb_container_name = null
- # The port on the container in var.clb_container_name to associate with an CLB.
- # Currently, ECS can only associate a CLB with a single container per service.
- # Only used if clb_name is set.
+ # The port on the container in var.clb_container_name to associate with an
+ # CLB. Currently, ECS can only associate a CLB with a single container per
+ # service. Only used if clb_name is set.
clb_container_port = null
# The name of a Classic Load Balancer (CLB) to associate with this service.
- # Containers in the service will automatically register with the CLB when booting
- # up. Set to null if using ELBv2.
+ # Containers in the service will automatically register with the CLB when
+ # booting up. Set to null if using ELBv2.
clb_name = null
- # The ARN of a KMS CMK to use for encrypting log events in the CloudWatch Logs.
- # Set to null to disable encryption. Only used if var.create_cloudwatch_log_group
- # is true.
+ # The ARN of a KMS CMK to use for encrypting log events in the CloudWatch
+ # Logs. Set to null to disable encryption. Only used if
+ # var.create_cloudwatch_log_group is true.
cloudwatch_log_group_kms_key_id = null
- # The name for the Cloudwatch logs that will be generated by the ecs service. Only
- # used (and required) if var.create_cloudwatch_log_group is true.
+ # The name for the Cloudwatch logs that will be generated by the ecs service.
+ # Only used (and required) if var.create_cloudwatch_log_group is true.
cloudwatch_log_group_name = null
- # Number of days to retain log events. Possible values are: 1, 3, 5, 7, 14, 30,
- # 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653, and 0. Select 0 to never
- # expire. Only used if var.create_cloudwatch_log_group is true.
+ # Number of days to retain log events. Possible values are: 1, 3, 5, 7, 14,
+ # 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653, and 0. Select 0
+ # to never expire. Only used if var.create_cloudwatch_log_group is true.
cloudwatch_log_group_retention = null
# A map of tags to apply to the Cloudwatch log group. Each item in this list
@@ -209,27 +209,27 @@ module "ecs_service" {
# The number of CPU units to allocate to the ECS Service.
cpu = 1
- # When true, create and manage the CloudWatch Log Group in the Terraform module
- # instead of relying on ECS. This is useful for configuring options that are not
- # available in the ECS native feature of managing the Log Group (e.g., encryption
- # support).
+ # When true, create and manage the CloudWatch Log Group in the Terraform
+ # module instead of relying on ECS. This is useful for configuring options
+ # that are not available in the ECS native feature of managing the Log Group
+ # (e.g., encryption support).
create_cloudwatch_log_group = false
- # Set to true if you want a DNS record automatically created and pointed at the
- # the load balancer for the ECS service
+ # Set to true if you want a DNS record automatically created and pointed at
+ # the the load balancer for the ECS service
create_route53_entry = false
- # If var.use_custom_docker_run_command is set to true, set this variable to the
- # custom docker run command you want to provide
+ # If var.use_custom_docker_run_command is set to true, set this variable to
+ # the custom docker run command you want to provide
custom_docker_command = null
- # The name to use for the ECS Service IAM role, which is used to grant permissions
- # to the ECS service to register the task IPs to ELBs.
+ # The name to use for the ECS Service IAM role, which is used to grant
+ # permissions to the ECS service to register the task IPs to ELBs.
custom_ecs_service_role_name = null
# Prefix for name of the custom IAM policies created by this module (those
- # resulting from var.iam_policy and var.secrets_access). If omitted, defaults to
- # var.service_name.
+ # resulting from var.iam_policy and var.secrets_access). If omitted, defaults
+ # to var.service_name.
custom_iam_policy_prefix = null
# Prefix for name of the IAM role used by the ECS task.
@@ -241,10 +241,10 @@ module "ecs_service" {
# Create a dependency between the resources in this module to the interpolated
# values in this list (and thus the source resources). In other words, the
- # resources in this module will now depend on the resources backing the values in
- # this list such that those resources need to be created before the resources in
- # this module, and the resources in this module need to be destroyed before the
- # resources in the list.
+ # resources in this module will now depend on the resources backing the values
+ # in this list such that those resources need to be created before the
+ # resources in this module, and the resources in this module need to be
+ # destroyed before the resources in the list.
dependencies = []
# Set the logging level of the deployment check script. You can set this to
@@ -255,35 +255,37 @@ module "ecs_service" {
# deployment. See ecs_deploy_check_binaries for more details.
deployment_check_timeout_seconds = 600
- # Set to 'true' to prevent the task from attempting to continuously redeploy after
- # a failed health check.
+ # Set to 'true' to prevent the task from attempting to continuously redeploy
+ # after a failed health check.
deployment_circuit_breaker_enabled = false
- # Set to 'true' to also automatically roll back to the last successful deployment.
- # deploy_circuit_breaker_enabled must also be true to enable this behavior.
+ # Set to 'true' to also automatically roll back to the last successful
+ # deployment. deploy_circuit_breaker_enabled must also be true to enable this
+ # behavior.
deployment_circuit_breaker_rollback = false
- # The upper limit, as a percentage of var.desired_number_of_tasks, of the number
- # of running tasks that can be running in a service during a deployment. Setting
- # this to more than 100 means that during deployment, ECS will deploy new
- # instances of a Task before undeploying the old ones.
+ # The upper limit, as a percentage of var.desired_number_of_tasks, of the
+ # number of running tasks that can be running in a service during a
+ # deployment. Setting this to more than 100 means that during deployment, ECS
+ # will deploy new instances of a Task before undeploying the old ones.
deployment_maximum_percent = 200
- # The lower limit, as a percentage of var.desired_number_of_tasks, of the number
- # of running tasks that must remain running and healthy in a service during a
- # deployment. Setting this to less than 100 means that during deployment, ECS may
- # undeploy old instances of a Task before deploying new ones.
+ # The lower limit, as a percentage of var.desired_number_of_tasks, of the
+ # number of running tasks that must remain running and healthy in a service
+ # during a deployment. Setting this to less than 100 means that during
+ # deployment, ECS may undeploy old instances of a Task before deploying new
+ # ones.
deployment_minimum_healthy_percent = 100
- # How many instances of the ECS Service to run across the ECS cluster for a canary
- # deployment. Typically, only 0 or 1 should be used.
+ # How many instances of the ECS Service to run across the ECS cluster for a
+ # canary deployment. Typically, only 0 or 1 should be used.
desired_number_of_canary_tasks = 0
# How many instances of the ECS Service to run across the ECS cluster
desired_number_of_tasks = 1
- # The domain name to create a route 53 record for. This DNS record will point to
- # the load balancer for the ECS service
+ # The domain name to create a route 53 record for. This DNS record will point
+ # to the load balancer for the ECS service
domain_name = null
# The ID of the security group that should be applied to ecs service instances
@@ -294,41 +296,41 @@ module "ecs_service" {
# container port and the value should be what host port to map it to.
ecs_node_port_mappings = {}
- # (Optional) A map of EFS volumes that containers in your task may use. Each item
- # in the list should be a map compatible with
- # https://www.terraform.io/docs/providers/aws/r/ecs_task_definition.html#efs-volum
- # -configuration-arguments.
+ # (Optional) A map of EFS volumes that containers in your task may use. Each
+ # item in the list should be a map compatible with
+ # https://www.terraform.io/docs/providers/aws/r/ecs_task_definition.html#efs-volume-configuration-arguments.
efs_volumes = {}
# The amount time for targets to warm up before the load balancer sends them a
- # full share of requests. The range is 30-900 seconds or 0 to disable. The default
- # value is 0 seconds. Only used if var.elb_target_groups is set.
+ # full share of requests. The range is 30-900 seconds or 0 to disable. The
+ # default value is 0 seconds. Only used if var.elb_target_groups is set.
elb_slow_start = 0
- # The amount of time for Elastic Load Balancing to wait before changing the state
- # of a deregistering target from draining to unused. The range is 0-3600 seconds.
- # Only used if var.elb_target_groups is set.
+ # The amount of time for Elastic Load Balancing to wait before changing the
+ # state of a deregistering target from draining to unused. The range is 0-3600
+ # seconds. Only used if var.elb_target_groups is set.
elb_target_group_deregistration_delay = 300
# The ID of the VPC in which to create the target group. Only used if
# var.elb_target_groups is set.
elb_target_group_vpc_id = null
- # Configurations for ELB target groups for ALBs and NLBs that should be associated
- # with the ECS Tasks. Each entry corresponds to a separate target group. Set to
- # the empty object ({}) if you are not using an ALB or NLB.
+ # Configurations for ELB target groups for ALBs and NLBs that should be
+ # associated with the ECS Tasks. Each entry corresponds to a separate target
+ # group. Set to the empty object ({}) if you are not using an ALB or NLB.
elb_target_groups = {}
# Set to true to enable Cloudwatch alarms on the ecs service instances
enable_cloudwatch_alarms = false
- # Whether or not to enable the ECS deployment check binary to make terraform wait
- # for the task to be deployed. See ecs_deploy_check_binaries for more details. You
- # must install the companion binary before the check can be used. Refer to the
- # README for more details.
+ # Whether or not to enable the ECS deployment check binary to make terraform
+ # wait for the task to be deployed. See ecs_deploy_check_binaries for more
+ # details. You must install the companion binary before the check can be used.
+ # Refer to the README for more details.
enable_ecs_deployment_check = true
- # Specifies whether to enable Amazon ECS Exec for the tasks within the service.
+ # Specifies whether to enable Amazon ECS Exec for the tasks within the
+ # service.
enable_execute_command = false
# Set this to true to create a route 53 health check and Cloudwatch alarm that
@@ -342,8 +344,8 @@ module "ecs_service" {
forward_rules = {}
- # If true, enable health checks on the target group. Only applies to ELBv2. For
- # CLBs, health checks are not configurable.
+ # If true, enable health checks on the target group. Only applies to ELBv2.
+ # For CLBs, health checks are not configurable.
health_check_enabled = true
# Seconds to ignore failing load balancer health checks on newly instantiated
@@ -351,83 +353,84 @@ module "ecs_service" {
# services configured to use load balancers.
health_check_grace_period_seconds = 0
- # The number of consecutive successful health checks required before considering
- # an unhealthy Target healthy. The acceptable range is 2 to 10.
+ # The number of consecutive successful health checks required before
+ # considering an unhealthy Target healthy. The acceptable range is 2 to 10.
health_check_healthy_threshold = 5
# The approximate amount of time, in seconds, between health checks of an
# individual Target. Minimum value 5 seconds, Maximum value 300 seconds.
health_check_interval = 30
- # The HTTP codes to use when checking for a successful response from a Target. You
- # can specify multiple values (e.g. '200,202') or a range of values (e.g.
+ # The HTTP codes to use when checking for a successful response from a Target.
+ # You can specify multiple values (e.g. '200,202') or a range of values (e.g.
# '200-299'). Required when using ALBs.
health_check_matcher = "200"
- # The ping path that is the destination on the Targets for health checks. Required
- # when using ALBs.
+ # The ping path that is the destination on the Targets for health checks.
+ # Required when using ALBs.
health_check_path = "/"
- # The port the ELB uses when performing health checks on Targets. The default is
- # to use the port on which each target receives traffic from the load balancer,
- # indicated by the value 'traffic-port'.
+ # The port the ELB uses when performing health checks on Targets. The default
+ # is to use the port on which each target receives traffic from the load
+ # balancer, indicated by the value 'traffic-port'.
health_check_port = "traffic-port"
- # The amount of time, in seconds, during which no response from a Target means a
- # failed health check. The acceptable range is 2 to 60 seconds.
+ # The amount of time, in seconds, during which no response from a Target means
+ # a failed health check. The acceptable range is 2 to 60 seconds.
health_check_timeout = 5
# The number of consecutive failed health checks required before considering a
- # target unhealthy. The acceptable range is 2 to 10. For NLBs, this value must be
- # the same as the health_check_healthy_threshold.
+ # target unhealthy. The acceptable range is 2 to 10. For NLBs, this value must
+ # be the same as the health_check_healthy_threshold.
health_check_unhealthy_threshold = 2
# The period, in seconds, over which to measure the CPU utilization percentage
high_cpu_utilization_period = 300
- # Trigger an alarm if the ECS Service has a CPU utilization percentage above this
- # threshold
+ # Trigger an alarm if the ECS Service has a CPU utilization percentage above
+ # this threshold
high_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Must be
- # one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Must
+ # be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_cpu_utilization_treat_missing_data = "missing"
- # The period, in seconds, over which to measure the memory utilization percentage
+ # The period, in seconds, over which to measure the memory utilization
+ # percentage
high_memory_utilization_period = 300
- # Trigger an alarm if the ECS Service has a memory utilization percentage above
- # this threshold
+ # Trigger an alarm if the ECS Service has a memory utilization percentage
+ # above this threshold
high_memory_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Must be
- # one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Must
+ # be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_memory_utilization_treat_missing_data = "missing"
- # The ID of the Route 53 hosted zone into which the Route 53 DNS record should be
- # written
+ # The ID of the Route 53 hosted zone into which the Route 53 DNS record should
+ # be written
hosted_zone_id = null
# An object defining the policy to attach to the ECS task. Accepts a map of
- # objects, where the map keys are sids for IAM policy statements, and the object
- # fields are the resources, actions, and the effect ("Allow" or "Deny") of the
- # statement.
+ # objects, where the map keys are sids for IAM policy statements, and the
+ # object fields are the resources, actions, and the effect ("Allow" or "Deny")
+ # of the statement.
iam_policy = null
- # The launch type of the ECS service. Must be one of EC2 or FARGATE. When using
- # FARGATE, you must set the network mode to awsvpc and configure it. When using
- # EC2, you can configure the placement strategy using the variables
+ # The launch type of the ECS service. Must be one of EC2 or FARGATE. When
+ # using FARGATE, you must set the network mode to awsvpc and configure it.
+ # When using EC2, you can configure the placement strategy using the variables
# var.placement_strategy_type, var.placement_strategy_field,
# var.placement_constraint_type, var.placement_constraint_expression. This
# variable is ignored if var.capacity_provider_strategy is provided.
launch_type = "EC2"
- # The ID of the Route 53 Hosted Zone in which to create a DNS A record pointed to
- # the ECS service's load balancer
+ # The ID of the Route 53 Hosted Zone in which to create a DNS A record pointed
+ # to the ECS service's load balancer
lb_hosted_zone_id = null
- # A map of tags to apply to the elb target group. Each item in this list should be
- # a map with the parameters key and value.
+ # A map of tags to apply to the elb target group. Each item in this list
+ # should be a map with the parameters key and value.
lb_target_group_tags = {}
# The maximum number of instances of the ECS Service to run. Auto scaling will
@@ -441,45 +444,45 @@ module "ecs_service" {
# never scale in below this number.
min_number_of_tasks = 1
- # The configuration to use when setting up the VPC network mode. Required and only
- # used if network_mode is awsvpc.
+ # The configuration to use when setting up the VPC network mode. Required and
+ # only used if network_mode is awsvpc.
network_configuration = null
# The Docker networking mode to use for the containers in the task. The valid
- # values are none, bridge, awsvpc, and host. If the network_mode is set to awsvpc,
- # you must configure var.network_configuration.
+ # values are none, bridge, awsvpc, and host. If the network_mode is set to
+ # awsvpc, you must configure var.network_configuration.
network_mode = "bridge"
# The DNS name that was assigned by AWS to the load balancer upon creation
original_lb_dns_name = null
- # Cluster Query Language expression to apply to the constraint for matching. Does
- # not need to be specified for the distinctInstance constraint type.
+ # Cluster Query Language expression to apply to the constraint for matching.
+ # Does not need to be specified for the distinctInstance constraint type.
placement_constraint_expression = "attribute:ecs.ami-id != 'ami-fake'"
- # The type of constraint to apply for container instance placement. The only valid
- # values at this time are memberOf and distinctInstance.
+ # The type of constraint to apply for container instance placement. The only
+ # valid values at this time are memberOf and distinctInstance.
placement_constraint_type = "memberOf"
# The field to apply the placement strategy against. For the spread placement
- # strategy, valid values are instanceId (or host, which has the same effect), or
- # any platform or custom attribute that is applied to a container instance, such
- # as attribute:ecs.availability-zone. For the binpack placement strategy, valid
- # values are cpu and memory. For the random placement strategy, this field is not
- # used.
+ # strategy, valid values are instanceId (or host, which has the same effect),
+ # or any platform or custom attribute that is applied to a container instance,
+ # such as attribute:ecs.availability-zone. For the binpack placement strategy,
+ # valid values are cpu and memory. For the random placement strategy, this
+ # field is not used.
placement_strategy_field = "cpu"
# The strategy to use when placing ECS tasks on EC2 instances. Can be binpack
# (default), random, or spread.
placement_strategy_type = "binpack"
- # Whether tags should be propogated to the tasks from the service or from the task
- # definition. Valid values are SERVICE and TASK_DEFINITION. Defaults to SERVICE.
- # If set to null, no tags are created for tasks.
+ # Whether tags should be propogated to the tasks from the service or from the
+ # task definition. Valid values are SERVICE and TASK_DEFINITION. Defaults to
+ # SERVICE. If set to null, no tags are created for tasks.
propagate_tags = "SERVICE"
- # Use the name of the Envoy proxy container from `container_definitions` as the
- # container name.
+ # Use the name of the Envoy proxy container from `container_definitions` as
+ # the container name.
proxy_configuration_container_name = null
# A map of network configuration parameters to provide the Container Network
@@ -488,9 +491,9 @@ module "ecs_service" {
redirect_rules = {}
- # The path, without any leading slash, that can be used as a health check (e.g.
- # healthcheck) by Route 53. Should return a 200 OK when the service is up and
- # running.
+ # The path, without any leading slash, that can be used as a health check
+ # (e.g. healthcheck) by Route 53. Should return a 200 OK when the service is
+ # up and running.
route53_health_check_path = "/"
# The port to use for Route 53 health checks. This should be the port for the
@@ -498,51 +501,56 @@ module "ecs_service" {
# (var.domain_name).
route53_health_check_port = 80
- # The protocol to use for Route 53 health checks. Should be one of HTTP, HTTPS.
+ # The protocol to use for Route 53 health checks. Should be one of HTTP,
+ # HTTPS.
route53_health_check_protocol = "HTTP"
- # The optional external_id to be used in the us-east-1 provider block defined in
- # the route53-health-check-alarms module. This module configures its own AWS
- # provider to ensure resources are created in us-east-1.
+ # The optional external_id to be used in the us-east-1 provider block defined
+ # in the route53-health-check-alarms module. This module configures its own
+ # AWS provider to ensure resources are created in us-east-1.
route53_health_check_provider_external_id = null
- # The optional AWS profile to be used in the us-east-1 provider block defined in
- # the route53-health-check-alarms module. This module configures its own AWS
- # provider to ensure resources are created in us-east-1.
+ # The optional AWS profile to be used in the us-east-1 provider block defined
+ # in the route53-health-check-alarms module. This module configures its own
+ # AWS provider to ensure resources are created in us-east-1.
route53_health_check_provider_profile = null
- # The optional role_arn to be used in the us-east-1 provider block defined in the
- # route53-health-check-alarms module. This module configures its own AWS provider
- # to ensure resources are created in us-east-1.
- route53_health_check_provider_role_arn = null
-
- # The optional session_name to be used in the us-east-1 provider block defined in
+ # The optional role_arn to be used in the us-east-1 provider block defined in
# the route53-health-check-alarms module. This module configures its own AWS
# provider to ensure resources are created in us-east-1.
+ route53_health_check_provider_role_arn = null
+
+ # The optional session_name to be used in the us-east-1 provider block defined
+ # in the route53-health-check-alarms module. This module configures its own
+ # AWS provider to ensure resources are created in us-east-1.
route53_health_check_provider_session_name = null
# The optional path to a credentials file used in the us-east-1 provider block
- # defined in the route53-health-check-alarms module. This module configures its
- # own AWS provider to ensure resources are created in us-east-1.
+ # defined in the route53-health-check-alarms module. This module configures
+ # its own AWS provider to ensure resources are created in us-east-1.
route53_health_check_provider_shared_credentials_file = null
- # A list of ARNs of Secrets Manager secrets that the task should have permissions
- # to read. The IAM role for the task will be granted
+ # Define runtime platform options
+ runtime_platform = null
+
+ # A list of ARNs of Secrets Manager secrets that the task should have
+ # permissions to read. The IAM role for the task will be granted
# `secretsmanager:GetSecretValue` for each secret in the list. The ARN can be
# either the complete ARN, including the randomly generated suffix, or the ARN
# without the suffix. If the latter, the module will look up the full ARN
- # automatically. This is helpful in cases where you don't yet know the randomly
- # generated suffix because the rest of the ARN is a predictable value.
+ # automatically. This is helpful in cases where you don't yet know the
+ # randomly generated suffix because the rest of the ARN is a predictable
+ # value.
secrets_access = []
# A list of ARNs for Secrets Manager secrets that the ECS execution IAM policy
- # should be granted access to read. Note that this is different from the ECS task
- # IAM policy. The execution policy is concerned with permissions required to run
- # the ECS task. The ARN can be either the complete ARN, including the randomly
- # generated suffix, or the ARN without the suffix. If the latter, the module will
- # look up the full ARN automatically. This is helpful in cases where you don't yet
- # know the randomly generated suffix because the rest of the ARN is a predictable
- # value.
+ # should be granted access to read. Note that this is different from the ECS
+ # task IAM policy. The execution policy is concerned with permissions required
+ # to run the ECS task. The ARN can be either the complete ARN, including the
+ # randomly generated suffix, or the ARN without the suffix. If the latter, the
+ # module will look up the full ARN automatically. This is helpful in cases
+ # where you don't yet know the randomly generated suffix because the rest of
+ # the ARN is a predictable value.
secrets_manager_arns = []
# The ARN of the kms key associated with secrets manager
@@ -550,32 +558,32 @@ module "ecs_service" {
# The name of the aws_security_group that gets created if var.network_mode is
# awsvpc and custom rules are specified for the ECS Fargate worker via
- # var.network_configuration.security_group_rules. Defaults to var.service_name if
- # not specified.
+ # var.network_configuration.security_group_rules. Defaults to var.service_name
+ # if not specified.
service_security_group_name = null
- # A map of tags to apply to the ECS service. Each item in this list should be a
- # map with the parameters key and value.
+ # A map of tags to apply to the ECS service. Each item in this list should be
+ # a map with the parameters key and value.
service_tags = {}
# The CPU units for the instances that Fargate will spin up. Options here:
- # https://docs.aws.amazon.com/AmazonECS/latest/developerguide/AWS_Fargate.html#far
- # ate-tasks-size. Required when using FARGATE launch type.
+ # https://docs.aws.amazon.com/AmazonECS/latest/developerguide/AWS_Fargate.html#fargate-tasks-size.
+ # Required when using FARGATE launch type.
task_cpu = null
- # A map of tags to apply to the task definition. Each item in this list should be
- # a map with the parameters key and value.
+ # A map of tags to apply to the task definition. Each item in this list should
+ # be a map with the parameters key and value.
task_definition_tags = {}
# The memory units for the instances that Fargate will spin up. Options here:
- # https://docs.aws.amazon.com/AmazonECS/latest/developerguide/AWS_Fargate.html#far
- # ate-tasks-size. Required when using FARGATE launch type.
+ # https://docs.aws.amazon.com/AmazonECS/latest/developerguide/AWS_Fargate.html#fargate-tasks-size.
+ # Required when using FARGATE launch type.
task_memory = null
# If true, the ALB will use use Sticky Sessions as described at
- # https://goo.gl/VLcNbk. Only used if var.elb_target_groups is set. Note that this
- # can only be true when associating with an ALB. This cannot be used with CLBs or
- # NLBs.
+ # https://goo.gl/VLcNbk. Only used if var.elb_target_groups is set. Note that
+ # this can only be true when associating with an ALB. This cannot be used with
+ # CLBs or NLBs.
use_alb_sticky_sessions = false
# Whether or not to enable auto scaling for the ecs service
@@ -585,10 +593,11 @@ module "ecs_service" {
# this to true, you must supply var.custom_docker_command
use_custom_docker_run_command = false
- # (Optional) A map of volume blocks that containers in your task may use. The key
- # should be the name of the volume and the value should be a map compatible with
- # https://www.terraform.io/docs/providers/aws/r/ecs_task_definition.html#volume-bl
- # ck-arguments, but not including the name parameter.
+ # (Optional) A map of volume blocks that containers in your task may use. The
+ # key should be the name of the volume and the value should be a map
+ # compatible with
+ # https://www.terraform.io/docs/providers/aws/r/ecs_task_definition.html#volume-block-arguments,
+ # but not including the name parameter.
volumes = {}
}
@@ -606,7 +615,7 @@ module "ecs_service" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/ecs-service?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/ecs-service?ref=v0.104.12"
}
inputs = {
@@ -615,18 +624,18 @@ inputs = {
# REQUIRED VARIABLES
# ----------------------------------------------------------------------------------------------------
- # List of container definitions to use for the ECS task. Each entry corresponds to
- # a different ECS container definition.
+ # List of container definitions to use for the ECS task. Each entry
+ # corresponds to a different ECS container definition.
container_definitions =
# A map of all the listeners on the load balancer. The keys should be the port
# numbers and the values should be the ARN of the listener for that port.
default_listener_arns =
- # The default port numbers on the load balancer to attach listener rules to. You
- # can override this default on a rule-by-rule basis by setting the listener_ports
- # parameter in each rule. The port numbers specified in this variable and the
- # listener_ports parameter must exist in var.listener_arns.
+ # The default port numbers on the load balancer to attach listener rules to.
+ # You can override this default on a rule-by-rule basis by setting the
+ # listener_ports parameter in each rule. The port numbers specified in this
+ # variable and the listener_ports parameter must exist in var.listener_arns.
default_listener_ports =
# The ARN of the cluster to which the ecs service should be deployed.
@@ -647,16 +656,16 @@ inputs = {
# A list of SNS topic ARNs to notify when the route53 health check changes to
# ALARM, OK, or INSUFFICIENT_DATA state. Note: these SNS topics MUST be in
- # us-east-1! This is because Route 53 only sends CloudWatch metrics to us-east-1,
- # so we must create the alarm in that region, and therefore, can only notify SNS
- # topics in that region
+ # us-east-1! This is because Route 53 only sends CloudWatch metrics to
+ # us-east-1, so we must create the alarm in that region, and therefore, can
+ # only notify SNS topics in that region
alarm_sns_topic_arns_us_east_1 = []
# The time period, in seconds, during which requests from a client should be
# routed to the same Target. After this time period expires, the load
- # balancer-generated cookie is considered stale. The acceptable range is 1 second
- # to 1 week (604800 seconds). The default value is 1 day (86400 seconds). Only
- # used if var.elb_target_groups is set.
+ # balancer-generated cookie is considered stale. The acceptable range is 1
+ # second to 1 week (604800 seconds). The default value is 1 day (86400
+ # seconds). Only used if var.elb_target_groups is set.
alb_sticky_session_cookie_duration = 86400
# The type of Sticky Sessions to use. See https://goo.gl/MNwqNu for possible
@@ -667,42 +676,42 @@ inputs = {
# corresponds to a different ECS container definition.
canary_container_definitions = []
- # Which version of the ECS Service Docker container to deploy as a canary (e.g.
- # 0.57)
+ # Which version of the ECS Service Docker container to deploy as a canary
+ # (e.g. 0.57)
canary_version = null
- # The capacity provider strategy to use for the service. Note that the capacity
- # providers have to be present on the ECS cluster before deploying the ECS
- # service. When provided, var.launch_type is ignored.
+ # The capacity provider strategy to use for the service. Note that the
+ # capacity providers have to be present on the ECS cluster before deploying
+ # the ECS service. When provided, var.launch_type is ignored.
capacity_provider_strategy = []
- # The name of the container, as it appears in the var.task_arn Task definition, to
- # associate with a CLB. Currently, ECS can only associate a CLB with a single
- # container per service. Only used if clb_name is set.
+ # The name of the container, as it appears in the var.task_arn Task
+ # definition, to associate with a CLB. Currently, ECS can only associate a CLB
+ # with a single container per service. Only used if clb_name is set.
clb_container_name = null
- # The port on the container in var.clb_container_name to associate with an CLB.
- # Currently, ECS can only associate a CLB with a single container per service.
- # Only used if clb_name is set.
+ # The port on the container in var.clb_container_name to associate with an
+ # CLB. Currently, ECS can only associate a CLB with a single container per
+ # service. Only used if clb_name is set.
clb_container_port = null
# The name of a Classic Load Balancer (CLB) to associate with this service.
- # Containers in the service will automatically register with the CLB when booting
- # up. Set to null if using ELBv2.
+ # Containers in the service will automatically register with the CLB when
+ # booting up. Set to null if using ELBv2.
clb_name = null
- # The ARN of a KMS CMK to use for encrypting log events in the CloudWatch Logs.
- # Set to null to disable encryption. Only used if var.create_cloudwatch_log_group
- # is true.
+ # The ARN of a KMS CMK to use for encrypting log events in the CloudWatch
+ # Logs. Set to null to disable encryption. Only used if
+ # var.create_cloudwatch_log_group is true.
cloudwatch_log_group_kms_key_id = null
- # The name for the Cloudwatch logs that will be generated by the ecs service. Only
- # used (and required) if var.create_cloudwatch_log_group is true.
+ # The name for the Cloudwatch logs that will be generated by the ecs service.
+ # Only used (and required) if var.create_cloudwatch_log_group is true.
cloudwatch_log_group_name = null
- # Number of days to retain log events. Possible values are: 1, 3, 5, 7, 14, 30,
- # 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653, and 0. Select 0 to never
- # expire. Only used if var.create_cloudwatch_log_group is true.
+ # Number of days to retain log events. Possible values are: 1, 3, 5, 7, 14,
+ # 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653, and 0. Select 0
+ # to never expire. Only used if var.create_cloudwatch_log_group is true.
cloudwatch_log_group_retention = null
# A map of tags to apply to the Cloudwatch log group. Each item in this list
@@ -713,27 +722,27 @@ inputs = {
# The number of CPU units to allocate to the ECS Service.
cpu = 1
- # When true, create and manage the CloudWatch Log Group in the Terraform module
- # instead of relying on ECS. This is useful for configuring options that are not
- # available in the ECS native feature of managing the Log Group (e.g., encryption
- # support).
+ # When true, create and manage the CloudWatch Log Group in the Terraform
+ # module instead of relying on ECS. This is useful for configuring options
+ # that are not available in the ECS native feature of managing the Log Group
+ # (e.g., encryption support).
create_cloudwatch_log_group = false
- # Set to true if you want a DNS record automatically created and pointed at the
- # the load balancer for the ECS service
+ # Set to true if you want a DNS record automatically created and pointed at
+ # the the load balancer for the ECS service
create_route53_entry = false
- # If var.use_custom_docker_run_command is set to true, set this variable to the
- # custom docker run command you want to provide
+ # If var.use_custom_docker_run_command is set to true, set this variable to
+ # the custom docker run command you want to provide
custom_docker_command = null
- # The name to use for the ECS Service IAM role, which is used to grant permissions
- # to the ECS service to register the task IPs to ELBs.
+ # The name to use for the ECS Service IAM role, which is used to grant
+ # permissions to the ECS service to register the task IPs to ELBs.
custom_ecs_service_role_name = null
# Prefix for name of the custom IAM policies created by this module (those
- # resulting from var.iam_policy and var.secrets_access). If omitted, defaults to
- # var.service_name.
+ # resulting from var.iam_policy and var.secrets_access). If omitted, defaults
+ # to var.service_name.
custom_iam_policy_prefix = null
# Prefix for name of the IAM role used by the ECS task.
@@ -745,10 +754,10 @@ inputs = {
# Create a dependency between the resources in this module to the interpolated
# values in this list (and thus the source resources). In other words, the
- # resources in this module will now depend on the resources backing the values in
- # this list such that those resources need to be created before the resources in
- # this module, and the resources in this module need to be destroyed before the
- # resources in the list.
+ # resources in this module will now depend on the resources backing the values
+ # in this list such that those resources need to be created before the
+ # resources in this module, and the resources in this module need to be
+ # destroyed before the resources in the list.
dependencies = []
# Set the logging level of the deployment check script. You can set this to
@@ -759,35 +768,37 @@ inputs = {
# deployment. See ecs_deploy_check_binaries for more details.
deployment_check_timeout_seconds = 600
- # Set to 'true' to prevent the task from attempting to continuously redeploy after
- # a failed health check.
+ # Set to 'true' to prevent the task from attempting to continuously redeploy
+ # after a failed health check.
deployment_circuit_breaker_enabled = false
- # Set to 'true' to also automatically roll back to the last successful deployment.
- # deploy_circuit_breaker_enabled must also be true to enable this behavior.
+ # Set to 'true' to also automatically roll back to the last successful
+ # deployment. deploy_circuit_breaker_enabled must also be true to enable this
+ # behavior.
deployment_circuit_breaker_rollback = false
- # The upper limit, as a percentage of var.desired_number_of_tasks, of the number
- # of running tasks that can be running in a service during a deployment. Setting
- # this to more than 100 means that during deployment, ECS will deploy new
- # instances of a Task before undeploying the old ones.
+ # The upper limit, as a percentage of var.desired_number_of_tasks, of the
+ # number of running tasks that can be running in a service during a
+ # deployment. Setting this to more than 100 means that during deployment, ECS
+ # will deploy new instances of a Task before undeploying the old ones.
deployment_maximum_percent = 200
- # The lower limit, as a percentage of var.desired_number_of_tasks, of the number
- # of running tasks that must remain running and healthy in a service during a
- # deployment. Setting this to less than 100 means that during deployment, ECS may
- # undeploy old instances of a Task before deploying new ones.
+ # The lower limit, as a percentage of var.desired_number_of_tasks, of the
+ # number of running tasks that must remain running and healthy in a service
+ # during a deployment. Setting this to less than 100 means that during
+ # deployment, ECS may undeploy old instances of a Task before deploying new
+ # ones.
deployment_minimum_healthy_percent = 100
- # How many instances of the ECS Service to run across the ECS cluster for a canary
- # deployment. Typically, only 0 or 1 should be used.
+ # How many instances of the ECS Service to run across the ECS cluster for a
+ # canary deployment. Typically, only 0 or 1 should be used.
desired_number_of_canary_tasks = 0
# How many instances of the ECS Service to run across the ECS cluster
desired_number_of_tasks = 1
- # The domain name to create a route 53 record for. This DNS record will point to
- # the load balancer for the ECS service
+ # The domain name to create a route 53 record for. This DNS record will point
+ # to the load balancer for the ECS service
domain_name = null
# The ID of the security group that should be applied to ecs service instances
@@ -798,41 +809,41 @@ inputs = {
# container port and the value should be what host port to map it to.
ecs_node_port_mappings = {}
- # (Optional) A map of EFS volumes that containers in your task may use. Each item
- # in the list should be a map compatible with
- # https://www.terraform.io/docs/providers/aws/r/ecs_task_definition.html#efs-volum
- # -configuration-arguments.
+ # (Optional) A map of EFS volumes that containers in your task may use. Each
+ # item in the list should be a map compatible with
+ # https://www.terraform.io/docs/providers/aws/r/ecs_task_definition.html#efs-volume-configuration-arguments.
efs_volumes = {}
# The amount time for targets to warm up before the load balancer sends them a
- # full share of requests. The range is 30-900 seconds or 0 to disable. The default
- # value is 0 seconds. Only used if var.elb_target_groups is set.
+ # full share of requests. The range is 30-900 seconds or 0 to disable. The
+ # default value is 0 seconds. Only used if var.elb_target_groups is set.
elb_slow_start = 0
- # The amount of time for Elastic Load Balancing to wait before changing the state
- # of a deregistering target from draining to unused. The range is 0-3600 seconds.
- # Only used if var.elb_target_groups is set.
+ # The amount of time for Elastic Load Balancing to wait before changing the
+ # state of a deregistering target from draining to unused. The range is 0-3600
+ # seconds. Only used if var.elb_target_groups is set.
elb_target_group_deregistration_delay = 300
# The ID of the VPC in which to create the target group. Only used if
# var.elb_target_groups is set.
elb_target_group_vpc_id = null
- # Configurations for ELB target groups for ALBs and NLBs that should be associated
- # with the ECS Tasks. Each entry corresponds to a separate target group. Set to
- # the empty object ({}) if you are not using an ALB or NLB.
+ # Configurations for ELB target groups for ALBs and NLBs that should be
+ # associated with the ECS Tasks. Each entry corresponds to a separate target
+ # group. Set to the empty object ({}) if you are not using an ALB or NLB.
elb_target_groups = {}
# Set to true to enable Cloudwatch alarms on the ecs service instances
enable_cloudwatch_alarms = false
- # Whether or not to enable the ECS deployment check binary to make terraform wait
- # for the task to be deployed. See ecs_deploy_check_binaries for more details. You
- # must install the companion binary before the check can be used. Refer to the
- # README for more details.
+ # Whether or not to enable the ECS deployment check binary to make terraform
+ # wait for the task to be deployed. See ecs_deploy_check_binaries for more
+ # details. You must install the companion binary before the check can be used.
+ # Refer to the README for more details.
enable_ecs_deployment_check = true
- # Specifies whether to enable Amazon ECS Exec for the tasks within the service.
+ # Specifies whether to enable Amazon ECS Exec for the tasks within the
+ # service.
enable_execute_command = false
# Set this to true to create a route 53 health check and Cloudwatch alarm that
@@ -846,8 +857,8 @@ inputs = {
forward_rules = {}
- # If true, enable health checks on the target group. Only applies to ELBv2. For
- # CLBs, health checks are not configurable.
+ # If true, enable health checks on the target group. Only applies to ELBv2.
+ # For CLBs, health checks are not configurable.
health_check_enabled = true
# Seconds to ignore failing load balancer health checks on newly instantiated
@@ -855,83 +866,84 @@ inputs = {
# services configured to use load balancers.
health_check_grace_period_seconds = 0
- # The number of consecutive successful health checks required before considering
- # an unhealthy Target healthy. The acceptable range is 2 to 10.
+ # The number of consecutive successful health checks required before
+ # considering an unhealthy Target healthy. The acceptable range is 2 to 10.
health_check_healthy_threshold = 5
# The approximate amount of time, in seconds, between health checks of an
# individual Target. Minimum value 5 seconds, Maximum value 300 seconds.
health_check_interval = 30
- # The HTTP codes to use when checking for a successful response from a Target. You
- # can specify multiple values (e.g. '200,202') or a range of values (e.g.
+ # The HTTP codes to use when checking for a successful response from a Target.
+ # You can specify multiple values (e.g. '200,202') or a range of values (e.g.
# '200-299'). Required when using ALBs.
health_check_matcher = "200"
- # The ping path that is the destination on the Targets for health checks. Required
- # when using ALBs.
+ # The ping path that is the destination on the Targets for health checks.
+ # Required when using ALBs.
health_check_path = "/"
- # The port the ELB uses when performing health checks on Targets. The default is
- # to use the port on which each target receives traffic from the load balancer,
- # indicated by the value 'traffic-port'.
+ # The port the ELB uses when performing health checks on Targets. The default
+ # is to use the port on which each target receives traffic from the load
+ # balancer, indicated by the value 'traffic-port'.
health_check_port = "traffic-port"
- # The amount of time, in seconds, during which no response from a Target means a
- # failed health check. The acceptable range is 2 to 60 seconds.
+ # The amount of time, in seconds, during which no response from a Target means
+ # a failed health check. The acceptable range is 2 to 60 seconds.
health_check_timeout = 5
# The number of consecutive failed health checks required before considering a
- # target unhealthy. The acceptable range is 2 to 10. For NLBs, this value must be
- # the same as the health_check_healthy_threshold.
+ # target unhealthy. The acceptable range is 2 to 10. For NLBs, this value must
+ # be the same as the health_check_healthy_threshold.
health_check_unhealthy_threshold = 2
# The period, in seconds, over which to measure the CPU utilization percentage
high_cpu_utilization_period = 300
- # Trigger an alarm if the ECS Service has a CPU utilization percentage above this
- # threshold
+ # Trigger an alarm if the ECS Service has a CPU utilization percentage above
+ # this threshold
high_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Must be
- # one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Must
+ # be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_cpu_utilization_treat_missing_data = "missing"
- # The period, in seconds, over which to measure the memory utilization percentage
+ # The period, in seconds, over which to measure the memory utilization
+ # percentage
high_memory_utilization_period = 300
- # Trigger an alarm if the ECS Service has a memory utilization percentage above
- # this threshold
+ # Trigger an alarm if the ECS Service has a memory utilization percentage
+ # above this threshold
high_memory_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Must be
- # one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Must
+ # be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_memory_utilization_treat_missing_data = "missing"
- # The ID of the Route 53 hosted zone into which the Route 53 DNS record should be
- # written
+ # The ID of the Route 53 hosted zone into which the Route 53 DNS record should
+ # be written
hosted_zone_id = null
# An object defining the policy to attach to the ECS task. Accepts a map of
- # objects, where the map keys are sids for IAM policy statements, and the object
- # fields are the resources, actions, and the effect ("Allow" or "Deny") of the
- # statement.
+ # objects, where the map keys are sids for IAM policy statements, and the
+ # object fields are the resources, actions, and the effect ("Allow" or "Deny")
+ # of the statement.
iam_policy = null
- # The launch type of the ECS service. Must be one of EC2 or FARGATE. When using
- # FARGATE, you must set the network mode to awsvpc and configure it. When using
- # EC2, you can configure the placement strategy using the variables
+ # The launch type of the ECS service. Must be one of EC2 or FARGATE. When
+ # using FARGATE, you must set the network mode to awsvpc and configure it.
+ # When using EC2, you can configure the placement strategy using the variables
# var.placement_strategy_type, var.placement_strategy_field,
# var.placement_constraint_type, var.placement_constraint_expression. This
# variable is ignored if var.capacity_provider_strategy is provided.
launch_type = "EC2"
- # The ID of the Route 53 Hosted Zone in which to create a DNS A record pointed to
- # the ECS service's load balancer
+ # The ID of the Route 53 Hosted Zone in which to create a DNS A record pointed
+ # to the ECS service's load balancer
lb_hosted_zone_id = null
- # A map of tags to apply to the elb target group. Each item in this list should be
- # a map with the parameters key and value.
+ # A map of tags to apply to the elb target group. Each item in this list
+ # should be a map with the parameters key and value.
lb_target_group_tags = {}
# The maximum number of instances of the ECS Service to run. Auto scaling will
@@ -945,45 +957,45 @@ inputs = {
# never scale in below this number.
min_number_of_tasks = 1
- # The configuration to use when setting up the VPC network mode. Required and only
- # used if network_mode is awsvpc.
+ # The configuration to use when setting up the VPC network mode. Required and
+ # only used if network_mode is awsvpc.
network_configuration = null
# The Docker networking mode to use for the containers in the task. The valid
- # values are none, bridge, awsvpc, and host. If the network_mode is set to awsvpc,
- # you must configure var.network_configuration.
+ # values are none, bridge, awsvpc, and host. If the network_mode is set to
+ # awsvpc, you must configure var.network_configuration.
network_mode = "bridge"
# The DNS name that was assigned by AWS to the load balancer upon creation
original_lb_dns_name = null
- # Cluster Query Language expression to apply to the constraint for matching. Does
- # not need to be specified for the distinctInstance constraint type.
+ # Cluster Query Language expression to apply to the constraint for matching.
+ # Does not need to be specified for the distinctInstance constraint type.
placement_constraint_expression = "attribute:ecs.ami-id != 'ami-fake'"
- # The type of constraint to apply for container instance placement. The only valid
- # values at this time are memberOf and distinctInstance.
+ # The type of constraint to apply for container instance placement. The only
+ # valid values at this time are memberOf and distinctInstance.
placement_constraint_type = "memberOf"
# The field to apply the placement strategy against. For the spread placement
- # strategy, valid values are instanceId (or host, which has the same effect), or
- # any platform or custom attribute that is applied to a container instance, such
- # as attribute:ecs.availability-zone. For the binpack placement strategy, valid
- # values are cpu and memory. For the random placement strategy, this field is not
- # used.
+ # strategy, valid values are instanceId (or host, which has the same effect),
+ # or any platform or custom attribute that is applied to a container instance,
+ # such as attribute:ecs.availability-zone. For the binpack placement strategy,
+ # valid values are cpu and memory. For the random placement strategy, this
+ # field is not used.
placement_strategy_field = "cpu"
# The strategy to use when placing ECS tasks on EC2 instances. Can be binpack
# (default), random, or spread.
placement_strategy_type = "binpack"
- # Whether tags should be propogated to the tasks from the service or from the task
- # definition. Valid values are SERVICE and TASK_DEFINITION. Defaults to SERVICE.
- # If set to null, no tags are created for tasks.
+ # Whether tags should be propogated to the tasks from the service or from the
+ # task definition. Valid values are SERVICE and TASK_DEFINITION. Defaults to
+ # SERVICE. If set to null, no tags are created for tasks.
propagate_tags = "SERVICE"
- # Use the name of the Envoy proxy container from `container_definitions` as the
- # container name.
+ # Use the name of the Envoy proxy container from `container_definitions` as
+ # the container name.
proxy_configuration_container_name = null
# A map of network configuration parameters to provide the Container Network
@@ -992,9 +1004,9 @@ inputs = {
redirect_rules = {}
- # The path, without any leading slash, that can be used as a health check (e.g.
- # healthcheck) by Route 53. Should return a 200 OK when the service is up and
- # running.
+ # The path, without any leading slash, that can be used as a health check
+ # (e.g. healthcheck) by Route 53. Should return a 200 OK when the service is
+ # up and running.
route53_health_check_path = "/"
# The port to use for Route 53 health checks. This should be the port for the
@@ -1002,51 +1014,56 @@ inputs = {
# (var.domain_name).
route53_health_check_port = 80
- # The protocol to use for Route 53 health checks. Should be one of HTTP, HTTPS.
+ # The protocol to use for Route 53 health checks. Should be one of HTTP,
+ # HTTPS.
route53_health_check_protocol = "HTTP"
- # The optional external_id to be used in the us-east-1 provider block defined in
- # the route53-health-check-alarms module. This module configures its own AWS
- # provider to ensure resources are created in us-east-1.
+ # The optional external_id to be used in the us-east-1 provider block defined
+ # in the route53-health-check-alarms module. This module configures its own
+ # AWS provider to ensure resources are created in us-east-1.
route53_health_check_provider_external_id = null
- # The optional AWS profile to be used in the us-east-1 provider block defined in
- # the route53-health-check-alarms module. This module configures its own AWS
- # provider to ensure resources are created in us-east-1.
+ # The optional AWS profile to be used in the us-east-1 provider block defined
+ # in the route53-health-check-alarms module. This module configures its own
+ # AWS provider to ensure resources are created in us-east-1.
route53_health_check_provider_profile = null
- # The optional role_arn to be used in the us-east-1 provider block defined in the
- # route53-health-check-alarms module. This module configures its own AWS provider
- # to ensure resources are created in us-east-1.
- route53_health_check_provider_role_arn = null
-
- # The optional session_name to be used in the us-east-1 provider block defined in
+ # The optional role_arn to be used in the us-east-1 provider block defined in
# the route53-health-check-alarms module. This module configures its own AWS
# provider to ensure resources are created in us-east-1.
+ route53_health_check_provider_role_arn = null
+
+ # The optional session_name to be used in the us-east-1 provider block defined
+ # in the route53-health-check-alarms module. This module configures its own
+ # AWS provider to ensure resources are created in us-east-1.
route53_health_check_provider_session_name = null
# The optional path to a credentials file used in the us-east-1 provider block
- # defined in the route53-health-check-alarms module. This module configures its
- # own AWS provider to ensure resources are created in us-east-1.
+ # defined in the route53-health-check-alarms module. This module configures
+ # its own AWS provider to ensure resources are created in us-east-1.
route53_health_check_provider_shared_credentials_file = null
- # A list of ARNs of Secrets Manager secrets that the task should have permissions
- # to read. The IAM role for the task will be granted
+ # Define runtime platform options
+ runtime_platform = null
+
+ # A list of ARNs of Secrets Manager secrets that the task should have
+ # permissions to read. The IAM role for the task will be granted
# `secretsmanager:GetSecretValue` for each secret in the list. The ARN can be
# either the complete ARN, including the randomly generated suffix, or the ARN
# without the suffix. If the latter, the module will look up the full ARN
- # automatically. This is helpful in cases where you don't yet know the randomly
- # generated suffix because the rest of the ARN is a predictable value.
+ # automatically. This is helpful in cases where you don't yet know the
+ # randomly generated suffix because the rest of the ARN is a predictable
+ # value.
secrets_access = []
# A list of ARNs for Secrets Manager secrets that the ECS execution IAM policy
- # should be granted access to read. Note that this is different from the ECS task
- # IAM policy. The execution policy is concerned with permissions required to run
- # the ECS task. The ARN can be either the complete ARN, including the randomly
- # generated suffix, or the ARN without the suffix. If the latter, the module will
- # look up the full ARN automatically. This is helpful in cases where you don't yet
- # know the randomly generated suffix because the rest of the ARN is a predictable
- # value.
+ # should be granted access to read. Note that this is different from the ECS
+ # task IAM policy. The execution policy is concerned with permissions required
+ # to run the ECS task. The ARN can be either the complete ARN, including the
+ # randomly generated suffix, or the ARN without the suffix. If the latter, the
+ # module will look up the full ARN automatically. This is helpful in cases
+ # where you don't yet know the randomly generated suffix because the rest of
+ # the ARN is a predictable value.
secrets_manager_arns = []
# The ARN of the kms key associated with secrets manager
@@ -1054,32 +1071,32 @@ inputs = {
# The name of the aws_security_group that gets created if var.network_mode is
# awsvpc and custom rules are specified for the ECS Fargate worker via
- # var.network_configuration.security_group_rules. Defaults to var.service_name if
- # not specified.
+ # var.network_configuration.security_group_rules. Defaults to var.service_name
+ # if not specified.
service_security_group_name = null
- # A map of tags to apply to the ECS service. Each item in this list should be a
- # map with the parameters key and value.
+ # A map of tags to apply to the ECS service. Each item in this list should be
+ # a map with the parameters key and value.
service_tags = {}
# The CPU units for the instances that Fargate will spin up. Options here:
- # https://docs.aws.amazon.com/AmazonECS/latest/developerguide/AWS_Fargate.html#far
- # ate-tasks-size. Required when using FARGATE launch type.
+ # https://docs.aws.amazon.com/AmazonECS/latest/developerguide/AWS_Fargate.html#fargate-tasks-size.
+ # Required when using FARGATE launch type.
task_cpu = null
- # A map of tags to apply to the task definition. Each item in this list should be
- # a map with the parameters key and value.
+ # A map of tags to apply to the task definition. Each item in this list should
+ # be a map with the parameters key and value.
task_definition_tags = {}
# The memory units for the instances that Fargate will spin up. Options here:
- # https://docs.aws.amazon.com/AmazonECS/latest/developerguide/AWS_Fargate.html#far
- # ate-tasks-size. Required when using FARGATE launch type.
+ # https://docs.aws.amazon.com/AmazonECS/latest/developerguide/AWS_Fargate.html#fargate-tasks-size.
+ # Required when using FARGATE launch type.
task_memory = null
# If true, the ALB will use use Sticky Sessions as described at
- # https://goo.gl/VLcNbk. Only used if var.elb_target_groups is set. Note that this
- # can only be true when associating with an ALB. This cannot be used with CLBs or
- # NLBs.
+ # https://goo.gl/VLcNbk. Only used if var.elb_target_groups is set. Note that
+ # this can only be true when associating with an ALB. This cannot be used with
+ # CLBs or NLBs.
use_alb_sticky_sessions = false
# Whether or not to enable auto scaling for the ecs service
@@ -1089,10 +1106,11 @@ inputs = {
# this to true, you must supply var.custom_docker_command
use_custom_docker_run_command = false
- # (Optional) A map of volume blocks that containers in your task may use. The key
- # should be the name of the volume and the value should be a map compatible with
- # https://www.terraform.io/docs/providers/aws/r/ecs_task_definition.html#volume-bl
- # ck-arguments, but not including the name parameter.
+ # (Optional) A map of volume blocks that containers in your task may use. The
+ # key should be the name of the volume and the value should be a map
+ # compatible with
+ # https://www.terraform.io/docs/providers/aws/r/ecs_task_definition.html#volume-block-arguments,
+ # but not including the name parameter.
volumes = {}
}
@@ -2550,6 +2568,25 @@ The optional path to a credentials file used in the us-east-1 provider block def
+
+
+
+Define runtime platform options
+
+
+
+
+```hcl
+object({
+ operating_system_family = string
+ cpu_architecture = string
+ })
+```
+
+
+
+
+
@@ -2860,11 +2897,11 @@ The names of the ECS service's load balancer's target groups
diff --git a/docs/reference/services/app-orchestration/amazon-eks-core-services.md b/docs/reference/services/app-orchestration/amazon-eks-core-services.md
index a268d7cd4d..a20737bfd3 100644
--- a/docs/reference/services/app-orchestration/amazon-eks-core-services.md
+++ b/docs/reference/services/app-orchestration/amazon-eks-core-services.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon EKS Core Services
-View Source
+View Source
Release Notes
@@ -68,9 +68,9 @@ For information on each of the core services deployed by this service, see the d
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/test): Automated tests for the modules and examples.
## Deploy
@@ -78,7 +78,7 @@ For information on each of the core services deployed by this service, see the d
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -86,7 +86,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -108,7 +108,7 @@ If you want to deploy this repo in production, check out the following resources
module "eks_core_services" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/eks-core-services?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/eks-core-services?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -120,28 +120,29 @@ module "eks_core_services" {
# The name of the EKS cluster where the core services will be deployed into.
eks_cluster_name =
- # Configuration for using the IAM role with Service Accounts feature to provide
- # permissions to the applications. This expects a map with two properties:
- # `openid_connect_provider_arn` and `openid_connect_provider_url`. The
- # `openid_connect_provider_arn` is the ARN of the OpenID Connect Provider for EKS
- # to retrieve IAM credentials, while `openid_connect_provider_url` is the URL. Set
- # to null if you do not wish to use IAM role with Service Accounts.
+ # Configuration for using the IAM role with Service Accounts feature to
+ # provide permissions to the applications. This expects a map with two
+ # properties: `openid_connect_provider_arn` and `openid_connect_provider_url`.
+ # The `openid_connect_provider_arn` is the ARN of the OpenID Connect Provider
+ # for EKS to retrieve IAM credentials, while `openid_connect_provider_url` is
+ # the URL. Set to null if you do not wish to use IAM role with Service
+ # Accounts.
eks_iam_role_for_service_accounts_config =
- # ARN of IAM Role to use as the Pod execution role for Fargate. Required if any of
- # the services are being scheduled on Fargate. Set to null if none of the Pods are
- # being scheduled on Fargate.
+ # ARN of IAM Role to use as the Pod execution role for Fargate. Required if
+ # any of the services are being scheduled on Fargate. Set to null if none of
+ # the Pods are being scheduled on Fargate.
pod_execution_iam_role_arn =
# The ID of the VPC where the EKS cluster is deployed.
vpc_id =
- # The subnet IDs to use for EKS worker nodes. Used when provisioning Pods on to
- # Fargate. Required if any of the services are being scheduled on Fargate. Set to
- # empty list if none of the Pods are being scheduled on Fargate.
+ # The subnet IDs to use for EKS worker nodes. Used when provisioning Pods on
+ # to Fargate. Required if any of the services are being scheduled on Fargate.
+ # Set to empty list if none of the Pods are being scheduled on Fargate.
worker_vpc_subnet_ids =
# ----------------------------------------------------------------------------------------------------
@@ -151,8 +152,8 @@ module "eks_core_services" {
# The version of the aws-load-balancer-controller helmchart to use.
alb_ingress_controller_chart_version = "1.4.1"
- # The repository of the aws-load-balancer-controller docker image that should be
- # deployed.
+ # The repository of the aws-load-balancer-controller docker image that should
+ # be deployed.
alb_ingress_controller_docker_image_repo = "602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon/aws-load-balancer-controller"
# The tag of the aws-load-balancer-controller docker image that should be
@@ -160,41 +161,43 @@ module "eks_core_services" {
alb_ingress_controller_docker_image_tag = "v2.4.1"
# Configure affinity rules for the ALB Ingress Controller Pod to control which
- # nodes to schedule on. Each item in the list should be a map with the keys `key`,
- # `values`, and `operator`, corresponding to the 3 properties of matchExpressions.
- # Note that all expressions must be satisfied to schedule on the node.
+ # nodes to schedule on. Each item in the list should be a map with the keys
+ # `key`, `values`, and `operator`, corresponding to the 3 properties of
+ # matchExpressions. Note that all expressions must be satisfied to schedule on
+ # the node.
alb_ingress_controller_pod_node_affinity = []
- # Configure tolerations rules to allow the ALB Ingress Controller Pod to schedule
- # on nodes that have been tainted. Each item in the list specifies a toleration
- # rule.
+ # Configure tolerations rules to allow the ALB Ingress Controller Pod to
+ # schedule on nodes that have been tainted. Each item in the list specifies a
+ # toleration rule.
alb_ingress_controller_pod_tolerations = []
- # Minimum time to wait after a scale up event before any node is considered for
- # scale down.
+ # Minimum time to wait after a scale up event before any node is considered
+ # for scale down.
autoscaler_down_delay_after_add = "10m"
# Number for the log level verbosity. Lower numbers are less verbose, higher
# numbers are more verbose. (Default: 4)
autoscaler_log_level_verbosity = 4
- # Minimum time to wait since the node became unused before the node is considered
- # for scale down by the autoscaler.
+ # Minimum time to wait since the node became unused before the node is
+ # considered for scale down by the autoscaler.
autoscaler_scale_down_unneeded_time = "10m"
- # If true cluster autoscaler will never delete nodes with pods with local storage,
- # e.g. EmptyDir or HostPath
+ # If true cluster autoscaler will never delete nodes with pods with local
+ # storage, e.g. EmptyDir or HostPath
autoscaler_skip_nodes_with_local_storage = true
- # The Container repository to use for looking up the cloudwatch-agent Container
- # image when deploying the pods. When null, uses the default repository set in the
- # chart. Only applies to non-fargate workers.
+ # The Container repository to use for looking up the cloudwatch-agent
+ # Container image when deploying the pods. When null, uses the default
+ # repository set in the chart. Only applies to non-fargate workers.
aws_cloudwatch_agent_image_repository = null
- # Configure affinity rules for the AWS CloudWatch Agent Pod to control which nodes
- # to schedule on. Each item in the list should be a map with the keys `key`,
- # `values`, and `operator`, corresponding to the 3 properties of matchExpressions.
- # Note that all expressions must be satisfied to schedule on the node.
+ # Configure affinity rules for the AWS CloudWatch Agent Pod to control which
+ # nodes to schedule on. Each item in the list should be a map with the keys
+ # `key`, `values`, and `operator`, corresponding to the 3 properties of
+ # matchExpressions. Note that all expressions must be satisfied to schedule on
+ # the node.
aws_cloudwatch_agent_pod_node_affinity = []
# Pod resource requests and limits to use. Refer to
@@ -202,13 +205,13 @@ module "eks_core_services" {
# for more information.
aws_cloudwatch_agent_pod_resources = null
- # Configure tolerations rules to allow the AWS CloudWatch Agent Pods to schedule
- # on nodes that have been tainted. Each item in the list specifies a toleration
- # rule.
+ # Configure tolerations rules to allow the AWS CloudWatch Agent Pods to
+ # schedule on nodes that have been tainted. Each item in the list specifies a
+ # toleration rule.
aws_cloudwatch_agent_pod_tolerations = []
- # Which version of amazon/cloudwatch-agent to install. When null, uses the default
- # version set in the chart. Only applies to non-fargate workers.
+ # Which version of amazon/cloudwatch-agent to install. When null, uses the
+ # default version set in the chart. Only applies to non-fargate workers.
aws_cloudwatch_agent_version = null
# Annotations to apply to the cluster autoscaler pod(s), as key value pairs.
@@ -217,10 +220,11 @@ module "eks_core_services" {
# Labels to apply to the cluster autoscaler pod(s), as key value pairs.
cluster_autoscaler_pod_labels = {}
- # Configure affinity rules for the cluster-autoscaler Pod to control which nodes
- # to schedule on. Each item in the list should be a map with the keys `key`,
- # `values`, and `operator`, corresponding to the 3 properties of matchExpressions.
- # Note that all expressions must be satisfied to schedule on the node.
+ # Configure affinity rules for the cluster-autoscaler Pod to control which
+ # nodes to schedule on. Each item in the list should be a map with the keys
+ # `key`, `values`, and `operator`, corresponding to the 3 properties of
+ # matchExpressions. Note that all expressions must be satisfied to schedule on
+ # the node.
cluster_autoscaler_pod_node_affinity = []
# Pod resource requests and limits to use. Refer to
@@ -229,12 +233,13 @@ module "eks_core_services" {
# availability for Fargate, which defaults to 0.25 vCPU and 256MB RAM.
cluster_autoscaler_pod_resources = {"limits":{"cpu":"250m","memory":"1024Mi"},"requests":{"cpu":"250m","memory":"1024Mi"}}
- # Configure tolerations rules to allow the cluster-autoscaler Pod to schedule on
- # nodes that have been tainted. Each item in the list specifies a toleration rule.
+ # Configure tolerations rules to allow the cluster-autoscaler Pod to schedule
+ # on nodes that have been tainted. Each item in the list specifies a
+ # toleration rule.
cluster_autoscaler_pod_tolerations = []
- # The name to use for the helm release for cluster-autoscaler. This is useful to
- # force a redeployment of the cluster-autoscaler component.
+ # The name to use for the helm release for cluster-autoscaler. This is useful
+ # to force a redeployment of the cluster-autoscaler component.
cluster_autoscaler_release_name = "cluster-autoscaler"
# Which docker repository to use to install the cluster autoscaler. Check the
@@ -242,8 +247,8 @@ module "eks_core_services" {
# https://github.com/kubernetes/autoscaler/releases
cluster_autoscaler_repository = "us.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler"
- # Specifies an 'expander' for the cluster autoscaler. This helps determine which
- # ASG to scale when additional resource capacity is needed.
+ # Specifies an 'expander' for the cluster autoscaler. This helps determine
+ # which ASG to scale when additional resource capacity is needed.
cluster_autoscaler_scaling_strategy = "least-waste"
# Which version of the cluster autoscaler to install. This should match the
@@ -255,33 +260,37 @@ module "eks_core_services" {
# Whether or not to enable the AWS LB Ingress controller.
enable_alb_ingress_controller = true
- # Whether to enable the AWS CloudWatch Agent DaemonSet for collecting container
- # and node metrics from worker nodes (self-managed ASG or managed node groups).
+ # Whether to enable the AWS CloudWatch Agent DaemonSet for collecting
+ # container and node metrics from worker nodes (self-managed ASG or managed
+ # node groups).
enable_aws_cloudwatch_agent = true
- # Whether or not to enable cluster-autoscaler for Autoscaling EKS worker nodes.
+ # Whether or not to enable cluster-autoscaler for Autoscaling EKS worker
+ # nodes.
enable_cluster_autoscaler = true
- # Whether or not to enable external-dns for DNS entry syncing with Route 53 for
- # Services and Ingresses.
+ # Whether or not to enable external-dns for DNS entry syncing with Route 53
+ # for Services and Ingresses.
enable_external_dns = true
- # Whether or not to enable fluent-bit on EKS Fargate workers for log aggregation.
+ # Whether or not to enable fluent-bit on EKS Fargate workers for log
+ # aggregation.
enable_fargate_fluent_bit = true
# Whether or not to enable fluent-bit for log aggregation.
enable_fluent_bit = true
# Duration string (e.g. 1m) indicating the interval between making changes to
- # Route 53 by external-dns. When null, use the default defined in the chart (1s).
+ # Route 53 by external-dns. When null, use the default defined in the chart
+ # (1s).
external_dns_batch_change_interval = null
- # The maximum number of changes that should be applied in a batch by external-dns.
- # When null, use the default defined in the chart (1000).
+ # The maximum number of changes that should be applied in a batch by
+ # external-dns. When null, use the default defined in the chart (1000).
external_dns_batch_change_size = null
- # Name of the Helm chart for external-dns. This should usually be 'external-dns'
- # but may differ in the case of overriding the repository URL.
+ # Name of the Helm chart for external-dns. This should usually be
+ # 'external-dns' but may differ in the case of overriding the repository URL.
external_dns_chart_name = "external-dns"
# Helm chart repository URL to obtain the external-dns chart from. Useful when
@@ -295,21 +304,24 @@ module "eks_core_services" {
# Configure affinity rules for the external-dns Pod to control which nodes to
# schedule on. Each item in the list should be a map with the keys `key`,
- # `values`, and `operator`, corresponding to the 3 properties of matchExpressions.
- # Note that all expressions must be satisfied to schedule on the node.
+ # `values`, and `operator`, corresponding to the 3 properties of
+ # matchExpressions. Note that all expressions must be satisfied to schedule on
+ # the node.
external_dns_pod_node_affinity = []
- # Configure tolerations rules to allow the external-dns Pod to schedule on nodes
- # that have been tainted. Each item in the list specifies a toleration rule.
+ # Configure tolerations rules to allow the external-dns Pod to schedule on
+ # nodes that have been tainted. Each item in the list specifies a toleration
+ # rule.
external_dns_pod_tolerations = []
# Duration string (e.g. 1m) indicating the polling interval for syncing the
- # domains by external-dns. When null, use the default defined in the chart (1m).
+ # domains by external-dns. When null, use the default defined in the chart
+ # (1m).
external_dns_poll_interval = null
- # Only create records in hosted zones that match the provided domain names. Empty
- # list (default) means match all zones. Zones must satisfy all three constraints
- # (var.external_dns_route53_hosted_zone_tag_filters,
+ # Only create records in hosted zones that match the provided domain names.
+ # Empty list (default) means match all zones. Zones must satisfy all three
+ # constraints (var.external_dns_route53_hosted_zone_tag_filters,
# var.external_dns_route53_hosted_zone_id_filters, and
# var.external_dns_route53_hosted_zone_domain_filters).
external_dns_route53_hosted_zone_domain_filters = []
@@ -321,17 +333,17 @@ module "eks_core_services" {
# var.external_dns_route53_hosted_zone_domain_filters).
external_dns_route53_hosted_zone_id_filters = []
- # Only create records in hosted zones that match the provided tags. Each item in
- # the list should specify tag key and tag value as a map. Empty list (default)
- # means match all zones. Zones must satisfy all three constraints
+ # Only create records in hosted zones that match the provided tags. Each item
+ # in the list should specify tag key and tag value as a map. Empty list
+ # (default) means match all zones. Zones must satisfy all three constraints
# (var.external_dns_route53_hosted_zone_tag_filters,
# var.external_dns_route53_hosted_zone_id_filters, and
# var.external_dns_route53_hosted_zone_domain_filters).
external_dns_route53_hosted_zone_tag_filters = []
# Duration string (e.g. 1m) indicating the amount of time the Hosted Zones are
- # cached in external-dns. When null, use the default defined in the chart (0 - no
- # caching).
+ # cached in external-dns. When null, use the default defined in the chart (0 -
+ # no caching).
external_dns_route53_zones_cache_duration = null
# K8s resources type to be observed for new DNS entries by ExternalDNS.
@@ -341,52 +353,47 @@ module "eks_core_services" {
# (optional, in addition of regular interval)
external_dns_trigger_loop_on_event = false
- # List of ARNs of Fargate execution IAM Roles that should get permissions to ship
- # logs using fluent-bit. This must be provided if enable_fargate_fluent_bit is
- # true.
+ # List of ARNs of Fargate execution IAM Roles that should get permissions to
+ # ship logs using fluent-bit. This must be provided if
+ # enable_fargate_fluent_bit is true.
fargate_fluent_bit_execution_iam_role_arns = []
# Additional filters that fluent-bit should apply to log output. This string
# should be formatted according to the Fluent-bit docs
- # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configur
- # tion-file#config_filter).
+ # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configuration-file#config_filter).
fargate_fluent_bit_extra_filters = ""
- # Additional parsers that fluent-bit should export logs to. This string should be
- # formatted according to the Fluent-bit docs
- # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configur
- # tion-file#config_output).
+ # Additional parsers that fluent-bit should export logs to. This string should
+ # be formatted according to the Fluent-bit docs
+ # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configuration-file#config_output).
fargate_fluent_bit_extra_parsers = ""
# Whether or not Kubernetes metadata is added to the log files
fargate_fluent_bit_include_kubernetes_metadata = true
- # Prefix string to use for the CloudWatch Log Stream that gets created for each
- # Fargate pod.
+ # Prefix string to use for the CloudWatch Log Stream that gets created for
+ # each Fargate pod.
fargate_fluent_bit_log_stream_prefix = "fargate"
- # A list of availability zones in the region that we CANNOT use to deploy the EKS
- # Fargate workers. You can use this to avoid availability zones that may not be
- # able to provision the resources (e.g ran out of capacity). If empty, will allow
- # all availability zones.
+ # A list of availability zones in the region that we CANNOT use to deploy the
+ # EKS Fargate workers. You can use this to avoid availability zones that may
+ # not be able to provision the resources (e.g ran out of capacity). If empty,
+ # will allow all availability zones.
fargate_worker_disallowed_availability_zones = ["us-east-1d","us-east-1e","ca-central-1d"]
# Can be used to add more inputs. This string should be formatted according to
# Fluent Bit docs
- # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-
- # ode/configuration-file#config_input).
+ # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/configuration-file#config_input).
fluent_bit_additional_inputs = ""
# Additional filters that fluent-bit should apply to log output. This string
# should be formatted according to the Fluent-bit docs
- # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configur
- # tion-file#config_filter).
+ # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configuration-file#config_filter).
fluent_bit_extra_filters = ""
# Additional output streams that fluent-bit should export logs to. This string
# should be formatted according to the Fluent-bit docs
- # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configur
- # tion-file#config_output).
+ # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configuration-file#config_output).
fluent_bit_extra_outputs = ""
# Can be used to add additional log parsers. This string should be formatted
@@ -394,62 +401,64 @@ module "eks_core_services" {
# fluent-bit.conf file.
fluent_bit_extra_parsers = ""
- # The Container repository to use for looking up the aws-for-fluent-bit Container
- # image when deploying the pods. When null, uses the default repository set in the
- # chart. Only applies to non-fargate workers.
+ # The Container repository to use for looking up the aws-for-fluent-bit
+ # Container image when deploying the pods. When null, uses the default
+ # repository set in the chart. Only applies to non-fargate workers.
fluent_bit_image_repository = null
- # If set to true, that means that the CloudWatch Log Group fluent-bit should use
- # for streaming logs already exists and does not need to be created.
+ # If set to true, that means that the CloudWatch Log Group fluent-bit should
+ # use for streaming logs already exists and does not need to be created.
fluent_bit_log_group_already_exists = false
- # The ARN of the KMS key to use to encrypt the logs in the CloudWatch Log Group
- # used for storing container logs streamed with FluentBit. Set to null to disable
- # encryption.
+ # The ARN of the KMS key to use to encrypt the logs in the CloudWatch Log
+ # Group used for storing container logs streamed with FluentBit. Set to null
+ # to disable encryption.
fluent_bit_log_group_kms_key_id = null
- # Name of the CloudWatch Log Group fluent-bit should use to stream logs to. When
- # null (default), uses the eks_cluster_name as the Log Group name.
+ # Name of the CloudWatch Log Group fluent-bit should use to stream logs to.
+ # When null (default), uses the eks_cluster_name as the Log Group name.
fluent_bit_log_group_name = null
- # number of days to retain log events. Possible values are: 1, 3, 5, 7, 14, 30,
- # 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653, and 0. Select 0 to never
- # expire.
+ # number of days to retain log events. Possible values are: 1, 3, 5, 7, 14,
+ # 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653, and 0. Select 0
+ # to never expire.
fluent_bit_log_group_retention = 0
- # ARN of the lambda function to trigger when events arrive at the fluent bit log
- # group.
+ # ARN of the lambda function to trigger when events arrive at the fluent bit
+ # log group.
fluent_bit_log_group_subscription_arn = null
# Filter pattern for the CloudWatch subscription. Only used if
# var.fluent_bit_log_group_subscription_arn is set.
fluent_bit_log_group_subscription_filter = ""
- # Prefix string to use for the CloudWatch Log Stream that gets created for each
- # pod. When null (default), the prefix is set to 'fluentbit'.
+ # Prefix string to use for the CloudWatch Log Stream that gets created for
+ # each pod. When null (default), the prefix is set to 'fluentbit'.
fluent_bit_log_stream_prefix = null
# Configure affinity rules for the fluent-bit Pods to control which nodes to
# schedule on. Each item in the list should be a map with the keys `key`,
- # `values`, and `operator`, corresponding to the 3 properties of matchExpressions.
- # Note that all expressions must be satisfied to schedule on the node.
+ # `values`, and `operator`, corresponding to the 3 properties of
+ # matchExpressions. Note that all expressions must be satisfied to schedule on
+ # the node.
fluent_bit_pod_node_affinity = []
- # Configure tolerations rules to allow the fluent-bit Pods to schedule on nodes
- # that have been tainted. Each item in the list specifies a toleration rule.
+ # Configure tolerations rules to allow the fluent-bit Pods to schedule on
+ # nodes that have been tainted. Each item in the list specifies a toleration
+ # rule.
fluent_bit_pod_tolerations = []
- # Optionally use a cri parser instead of the default Docker parser. This should be
- # used for EKS v1.24 and later.
+ # Optionally use a cri parser instead of the default Docker parser. This
+ # should be used for EKS v1.24 and later.
fluent_bit_use_cri_parser_conf = true
# Which version of aws-for-fluent-bit to install. When null, uses the default
# version set in the chart. Only applies to non-fargate workers.
fluent_bit_version = null
- # A map of PriorityClass configurations, with the key as the PriorityClass name.
- # https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/
- # priorityclass
+ # A map of PriorityClass configurations, with the key as the PriorityClass
+ # name.
+ # https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass
kubernetes_priority_classes = {}
# Policy for how DNS records are sychronized between sources and providers
@@ -461,42 +470,43 @@ module "eks_core_services" {
# When true, the cluster autoscaler pods will be scheduled on Fargate. It is
# recommended to run the cluster autoscaler on Fargate to avoid the autoscaler
- # scaling down a node where it is running (and thus shutting itself down during a
- # scale down event). However, since Fargate is only supported on a handful of
- # regions, we don't default to true here.
+ # scaling down a node where it is running (and thus shutting itself down
+ # during a scale down event). However, since Fargate is only supported on a
+ # handful of regions, we don't default to true here.
schedule_cluster_autoscaler_on_fargate = false
# When true, the external-dns pods will be scheduled on Fargate.
schedule_external_dns_on_fargate = false
- # Configure Kubernetes Services to lookup external DNS records. This can be useful
- # to bind friendly internal service names to domains (e.g. the RDS database
- # endpoint).
+ # Configure Kubernetes Services to lookup external DNS records. This can be
+ # useful to bind friendly internal service names to domains (e.g. the RDS
+ # database endpoint).
service_dns_mappings = {}
- # If this variable is set to true, then use an exec-based plugin to authenticate
- # and fetch tokens for EKS. This is useful because EKS clusters use short-lived
- # authentication tokens that can expire in the middle of an 'apply' or 'destroy',
- # and since the native Kubernetes provider in Terraform doesn't have a way to
- # fetch up-to-date tokens, we recommend using an exec-based provider as a
- # workaround. Use the use_kubergrunt_to_fetch_token input variable to control
- # whether kubergrunt or aws is used to fetch tokens.
+ # If this variable is set to true, then use an exec-based plugin to
+ # authenticate and fetch tokens for EKS. This is useful because EKS clusters
+ # use short-lived authentication tokens that can expire in the middle of an
+ # 'apply' or 'destroy', and since the native Kubernetes provider in Terraform
+ # doesn't have a way to fetch up-to-date tokens, we recommend using an
+ # exec-based provider as a workaround. Use the use_kubergrunt_to_fetch_token
+ # input variable to control whether kubergrunt or aws is used to fetch tokens.
use_exec_plugin_for_auth = true
- # EKS clusters use short-lived authentication tokens that can expire in the middle
- # of an 'apply' or 'destroy'. To avoid this issue, we use an exec-based plugin to
- # fetch an up-to-date token. If this variable is set to true, we'll use kubergrunt
- # to fetch the token (in which case, kubergrunt must be installed and on PATH); if
- # this variable is set to false, we'll use the aws CLI to fetch the token (in
- # which case, aws must be installed and on PATH). Note this functionality is only
- # enabled if use_exec_plugin_for_auth is set to true.
+ # EKS clusters use short-lived authentication tokens that can expire in the
+ # middle of an 'apply' or 'destroy'. To avoid this issue, we use an exec-based
+ # plugin to fetch an up-to-date token. If this variable is set to true, we'll
+ # use kubergrunt to fetch the token (in which case, kubergrunt must be
+ # installed and on PATH); if this variable is set to false, we'll use the aws
+ # CLI to fetch the token (in which case, aws must be installed and on PATH).
+ # Note this functionality is only enabled if use_exec_plugin_for_auth is set
+ # to true.
use_kubergrunt_to_fetch_token = true
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
}
@@ -514,7 +524,7 @@ module "eks_core_services" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/eks-core-services?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/eks-core-services?ref=v0.104.12"
}
inputs = {
@@ -529,28 +539,29 @@ inputs = {
# The name of the EKS cluster where the core services will be deployed into.
eks_cluster_name =
- # Configuration for using the IAM role with Service Accounts feature to provide
- # permissions to the applications. This expects a map with two properties:
- # `openid_connect_provider_arn` and `openid_connect_provider_url`. The
- # `openid_connect_provider_arn` is the ARN of the OpenID Connect Provider for EKS
- # to retrieve IAM credentials, while `openid_connect_provider_url` is the URL. Set
- # to null if you do not wish to use IAM role with Service Accounts.
+ # Configuration for using the IAM role with Service Accounts feature to
+ # provide permissions to the applications. This expects a map with two
+ # properties: `openid_connect_provider_arn` and `openid_connect_provider_url`.
+ # The `openid_connect_provider_arn` is the ARN of the OpenID Connect Provider
+ # for EKS to retrieve IAM credentials, while `openid_connect_provider_url` is
+ # the URL. Set to null if you do not wish to use IAM role with Service
+ # Accounts.
eks_iam_role_for_service_accounts_config =
- # ARN of IAM Role to use as the Pod execution role for Fargate. Required if any of
- # the services are being scheduled on Fargate. Set to null if none of the Pods are
- # being scheduled on Fargate.
+ # ARN of IAM Role to use as the Pod execution role for Fargate. Required if
+ # any of the services are being scheduled on Fargate. Set to null if none of
+ # the Pods are being scheduled on Fargate.
pod_execution_iam_role_arn =
# The ID of the VPC where the EKS cluster is deployed.
vpc_id =
- # The subnet IDs to use for EKS worker nodes. Used when provisioning Pods on to
- # Fargate. Required if any of the services are being scheduled on Fargate. Set to
- # empty list if none of the Pods are being scheduled on Fargate.
+ # The subnet IDs to use for EKS worker nodes. Used when provisioning Pods on
+ # to Fargate. Required if any of the services are being scheduled on Fargate.
+ # Set to empty list if none of the Pods are being scheduled on Fargate.
worker_vpc_subnet_ids =
# ----------------------------------------------------------------------------------------------------
@@ -560,8 +571,8 @@ inputs = {
# The version of the aws-load-balancer-controller helmchart to use.
alb_ingress_controller_chart_version = "1.4.1"
- # The repository of the aws-load-balancer-controller docker image that should be
- # deployed.
+ # The repository of the aws-load-balancer-controller docker image that should
+ # be deployed.
alb_ingress_controller_docker_image_repo = "602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon/aws-load-balancer-controller"
# The tag of the aws-load-balancer-controller docker image that should be
@@ -569,41 +580,43 @@ inputs = {
alb_ingress_controller_docker_image_tag = "v2.4.1"
# Configure affinity rules for the ALB Ingress Controller Pod to control which
- # nodes to schedule on. Each item in the list should be a map with the keys `key`,
- # `values`, and `operator`, corresponding to the 3 properties of matchExpressions.
- # Note that all expressions must be satisfied to schedule on the node.
+ # nodes to schedule on. Each item in the list should be a map with the keys
+ # `key`, `values`, and `operator`, corresponding to the 3 properties of
+ # matchExpressions. Note that all expressions must be satisfied to schedule on
+ # the node.
alb_ingress_controller_pod_node_affinity = []
- # Configure tolerations rules to allow the ALB Ingress Controller Pod to schedule
- # on nodes that have been tainted. Each item in the list specifies a toleration
- # rule.
+ # Configure tolerations rules to allow the ALB Ingress Controller Pod to
+ # schedule on nodes that have been tainted. Each item in the list specifies a
+ # toleration rule.
alb_ingress_controller_pod_tolerations = []
- # Minimum time to wait after a scale up event before any node is considered for
- # scale down.
+ # Minimum time to wait after a scale up event before any node is considered
+ # for scale down.
autoscaler_down_delay_after_add = "10m"
# Number for the log level verbosity. Lower numbers are less verbose, higher
# numbers are more verbose. (Default: 4)
autoscaler_log_level_verbosity = 4
- # Minimum time to wait since the node became unused before the node is considered
- # for scale down by the autoscaler.
+ # Minimum time to wait since the node became unused before the node is
+ # considered for scale down by the autoscaler.
autoscaler_scale_down_unneeded_time = "10m"
- # If true cluster autoscaler will never delete nodes with pods with local storage,
- # e.g. EmptyDir or HostPath
+ # If true cluster autoscaler will never delete nodes with pods with local
+ # storage, e.g. EmptyDir or HostPath
autoscaler_skip_nodes_with_local_storage = true
- # The Container repository to use for looking up the cloudwatch-agent Container
- # image when deploying the pods. When null, uses the default repository set in the
- # chart. Only applies to non-fargate workers.
+ # The Container repository to use for looking up the cloudwatch-agent
+ # Container image when deploying the pods. When null, uses the default
+ # repository set in the chart. Only applies to non-fargate workers.
aws_cloudwatch_agent_image_repository = null
- # Configure affinity rules for the AWS CloudWatch Agent Pod to control which nodes
- # to schedule on. Each item in the list should be a map with the keys `key`,
- # `values`, and `operator`, corresponding to the 3 properties of matchExpressions.
- # Note that all expressions must be satisfied to schedule on the node.
+ # Configure affinity rules for the AWS CloudWatch Agent Pod to control which
+ # nodes to schedule on. Each item in the list should be a map with the keys
+ # `key`, `values`, and `operator`, corresponding to the 3 properties of
+ # matchExpressions. Note that all expressions must be satisfied to schedule on
+ # the node.
aws_cloudwatch_agent_pod_node_affinity = []
# Pod resource requests and limits to use. Refer to
@@ -611,13 +624,13 @@ inputs = {
# for more information.
aws_cloudwatch_agent_pod_resources = null
- # Configure tolerations rules to allow the AWS CloudWatch Agent Pods to schedule
- # on nodes that have been tainted. Each item in the list specifies a toleration
- # rule.
+ # Configure tolerations rules to allow the AWS CloudWatch Agent Pods to
+ # schedule on nodes that have been tainted. Each item in the list specifies a
+ # toleration rule.
aws_cloudwatch_agent_pod_tolerations = []
- # Which version of amazon/cloudwatch-agent to install. When null, uses the default
- # version set in the chart. Only applies to non-fargate workers.
+ # Which version of amazon/cloudwatch-agent to install. When null, uses the
+ # default version set in the chart. Only applies to non-fargate workers.
aws_cloudwatch_agent_version = null
# Annotations to apply to the cluster autoscaler pod(s), as key value pairs.
@@ -626,10 +639,11 @@ inputs = {
# Labels to apply to the cluster autoscaler pod(s), as key value pairs.
cluster_autoscaler_pod_labels = {}
- # Configure affinity rules for the cluster-autoscaler Pod to control which nodes
- # to schedule on. Each item in the list should be a map with the keys `key`,
- # `values`, and `operator`, corresponding to the 3 properties of matchExpressions.
- # Note that all expressions must be satisfied to schedule on the node.
+ # Configure affinity rules for the cluster-autoscaler Pod to control which
+ # nodes to schedule on. Each item in the list should be a map with the keys
+ # `key`, `values`, and `operator`, corresponding to the 3 properties of
+ # matchExpressions. Note that all expressions must be satisfied to schedule on
+ # the node.
cluster_autoscaler_pod_node_affinity = []
# Pod resource requests and limits to use. Refer to
@@ -638,12 +652,13 @@ inputs = {
# availability for Fargate, which defaults to 0.25 vCPU and 256MB RAM.
cluster_autoscaler_pod_resources = {"limits":{"cpu":"250m","memory":"1024Mi"},"requests":{"cpu":"250m","memory":"1024Mi"}}
- # Configure tolerations rules to allow the cluster-autoscaler Pod to schedule on
- # nodes that have been tainted. Each item in the list specifies a toleration rule.
+ # Configure tolerations rules to allow the cluster-autoscaler Pod to schedule
+ # on nodes that have been tainted. Each item in the list specifies a
+ # toleration rule.
cluster_autoscaler_pod_tolerations = []
- # The name to use for the helm release for cluster-autoscaler. This is useful to
- # force a redeployment of the cluster-autoscaler component.
+ # The name to use for the helm release for cluster-autoscaler. This is useful
+ # to force a redeployment of the cluster-autoscaler component.
cluster_autoscaler_release_name = "cluster-autoscaler"
# Which docker repository to use to install the cluster autoscaler. Check the
@@ -651,8 +666,8 @@ inputs = {
# https://github.com/kubernetes/autoscaler/releases
cluster_autoscaler_repository = "us.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler"
- # Specifies an 'expander' for the cluster autoscaler. This helps determine which
- # ASG to scale when additional resource capacity is needed.
+ # Specifies an 'expander' for the cluster autoscaler. This helps determine
+ # which ASG to scale when additional resource capacity is needed.
cluster_autoscaler_scaling_strategy = "least-waste"
# Which version of the cluster autoscaler to install. This should match the
@@ -664,33 +679,37 @@ inputs = {
# Whether or not to enable the AWS LB Ingress controller.
enable_alb_ingress_controller = true
- # Whether to enable the AWS CloudWatch Agent DaemonSet for collecting container
- # and node metrics from worker nodes (self-managed ASG or managed node groups).
+ # Whether to enable the AWS CloudWatch Agent DaemonSet for collecting
+ # container and node metrics from worker nodes (self-managed ASG or managed
+ # node groups).
enable_aws_cloudwatch_agent = true
- # Whether or not to enable cluster-autoscaler for Autoscaling EKS worker nodes.
+ # Whether or not to enable cluster-autoscaler for Autoscaling EKS worker
+ # nodes.
enable_cluster_autoscaler = true
- # Whether or not to enable external-dns for DNS entry syncing with Route 53 for
- # Services and Ingresses.
+ # Whether or not to enable external-dns for DNS entry syncing with Route 53
+ # for Services and Ingresses.
enable_external_dns = true
- # Whether or not to enable fluent-bit on EKS Fargate workers for log aggregation.
+ # Whether or not to enable fluent-bit on EKS Fargate workers for log
+ # aggregation.
enable_fargate_fluent_bit = true
# Whether or not to enable fluent-bit for log aggregation.
enable_fluent_bit = true
# Duration string (e.g. 1m) indicating the interval between making changes to
- # Route 53 by external-dns. When null, use the default defined in the chart (1s).
+ # Route 53 by external-dns. When null, use the default defined in the chart
+ # (1s).
external_dns_batch_change_interval = null
- # The maximum number of changes that should be applied in a batch by external-dns.
- # When null, use the default defined in the chart (1000).
+ # The maximum number of changes that should be applied in a batch by
+ # external-dns. When null, use the default defined in the chart (1000).
external_dns_batch_change_size = null
- # Name of the Helm chart for external-dns. This should usually be 'external-dns'
- # but may differ in the case of overriding the repository URL.
+ # Name of the Helm chart for external-dns. This should usually be
+ # 'external-dns' but may differ in the case of overriding the repository URL.
external_dns_chart_name = "external-dns"
# Helm chart repository URL to obtain the external-dns chart from. Useful when
@@ -704,21 +723,24 @@ inputs = {
# Configure affinity rules for the external-dns Pod to control which nodes to
# schedule on. Each item in the list should be a map with the keys `key`,
- # `values`, and `operator`, corresponding to the 3 properties of matchExpressions.
- # Note that all expressions must be satisfied to schedule on the node.
+ # `values`, and `operator`, corresponding to the 3 properties of
+ # matchExpressions. Note that all expressions must be satisfied to schedule on
+ # the node.
external_dns_pod_node_affinity = []
- # Configure tolerations rules to allow the external-dns Pod to schedule on nodes
- # that have been tainted. Each item in the list specifies a toleration rule.
+ # Configure tolerations rules to allow the external-dns Pod to schedule on
+ # nodes that have been tainted. Each item in the list specifies a toleration
+ # rule.
external_dns_pod_tolerations = []
# Duration string (e.g. 1m) indicating the polling interval for syncing the
- # domains by external-dns. When null, use the default defined in the chart (1m).
+ # domains by external-dns. When null, use the default defined in the chart
+ # (1m).
external_dns_poll_interval = null
- # Only create records in hosted zones that match the provided domain names. Empty
- # list (default) means match all zones. Zones must satisfy all three constraints
- # (var.external_dns_route53_hosted_zone_tag_filters,
+ # Only create records in hosted zones that match the provided domain names.
+ # Empty list (default) means match all zones. Zones must satisfy all three
+ # constraints (var.external_dns_route53_hosted_zone_tag_filters,
# var.external_dns_route53_hosted_zone_id_filters, and
# var.external_dns_route53_hosted_zone_domain_filters).
external_dns_route53_hosted_zone_domain_filters = []
@@ -730,17 +752,17 @@ inputs = {
# var.external_dns_route53_hosted_zone_domain_filters).
external_dns_route53_hosted_zone_id_filters = []
- # Only create records in hosted zones that match the provided tags. Each item in
- # the list should specify tag key and tag value as a map. Empty list (default)
- # means match all zones. Zones must satisfy all three constraints
+ # Only create records in hosted zones that match the provided tags. Each item
+ # in the list should specify tag key and tag value as a map. Empty list
+ # (default) means match all zones. Zones must satisfy all three constraints
# (var.external_dns_route53_hosted_zone_tag_filters,
# var.external_dns_route53_hosted_zone_id_filters, and
# var.external_dns_route53_hosted_zone_domain_filters).
external_dns_route53_hosted_zone_tag_filters = []
# Duration string (e.g. 1m) indicating the amount of time the Hosted Zones are
- # cached in external-dns. When null, use the default defined in the chart (0 - no
- # caching).
+ # cached in external-dns. When null, use the default defined in the chart (0 -
+ # no caching).
external_dns_route53_zones_cache_duration = null
# K8s resources type to be observed for new DNS entries by ExternalDNS.
@@ -750,52 +772,47 @@ inputs = {
# (optional, in addition of regular interval)
external_dns_trigger_loop_on_event = false
- # List of ARNs of Fargate execution IAM Roles that should get permissions to ship
- # logs using fluent-bit. This must be provided if enable_fargate_fluent_bit is
- # true.
+ # List of ARNs of Fargate execution IAM Roles that should get permissions to
+ # ship logs using fluent-bit. This must be provided if
+ # enable_fargate_fluent_bit is true.
fargate_fluent_bit_execution_iam_role_arns = []
# Additional filters that fluent-bit should apply to log output. This string
# should be formatted according to the Fluent-bit docs
- # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configur
- # tion-file#config_filter).
+ # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configuration-file#config_filter).
fargate_fluent_bit_extra_filters = ""
- # Additional parsers that fluent-bit should export logs to. This string should be
- # formatted according to the Fluent-bit docs
- # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configur
- # tion-file#config_output).
+ # Additional parsers that fluent-bit should export logs to. This string should
+ # be formatted according to the Fluent-bit docs
+ # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configuration-file#config_output).
fargate_fluent_bit_extra_parsers = ""
# Whether or not Kubernetes metadata is added to the log files
fargate_fluent_bit_include_kubernetes_metadata = true
- # Prefix string to use for the CloudWatch Log Stream that gets created for each
- # Fargate pod.
+ # Prefix string to use for the CloudWatch Log Stream that gets created for
+ # each Fargate pod.
fargate_fluent_bit_log_stream_prefix = "fargate"
- # A list of availability zones in the region that we CANNOT use to deploy the EKS
- # Fargate workers. You can use this to avoid availability zones that may not be
- # able to provision the resources (e.g ran out of capacity). If empty, will allow
- # all availability zones.
+ # A list of availability zones in the region that we CANNOT use to deploy the
+ # EKS Fargate workers. You can use this to avoid availability zones that may
+ # not be able to provision the resources (e.g ran out of capacity). If empty,
+ # will allow all availability zones.
fargate_worker_disallowed_availability_zones = ["us-east-1d","us-east-1e","ca-central-1d"]
# Can be used to add more inputs. This string should be formatted according to
# Fluent Bit docs
- # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-
- # ode/configuration-file#config_input).
+ # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/configuration-file#config_input).
fluent_bit_additional_inputs = ""
# Additional filters that fluent-bit should apply to log output. This string
# should be formatted according to the Fluent-bit docs
- # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configur
- # tion-file#config_filter).
+ # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configuration-file#config_filter).
fluent_bit_extra_filters = ""
# Additional output streams that fluent-bit should export logs to. This string
# should be formatted according to the Fluent-bit docs
- # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configur
- # tion-file#config_output).
+ # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configuration-file#config_output).
fluent_bit_extra_outputs = ""
# Can be used to add additional log parsers. This string should be formatted
@@ -803,62 +820,64 @@ inputs = {
# fluent-bit.conf file.
fluent_bit_extra_parsers = ""
- # The Container repository to use for looking up the aws-for-fluent-bit Container
- # image when deploying the pods. When null, uses the default repository set in the
- # chart. Only applies to non-fargate workers.
+ # The Container repository to use for looking up the aws-for-fluent-bit
+ # Container image when deploying the pods. When null, uses the default
+ # repository set in the chart. Only applies to non-fargate workers.
fluent_bit_image_repository = null
- # If set to true, that means that the CloudWatch Log Group fluent-bit should use
- # for streaming logs already exists and does not need to be created.
+ # If set to true, that means that the CloudWatch Log Group fluent-bit should
+ # use for streaming logs already exists and does not need to be created.
fluent_bit_log_group_already_exists = false
- # The ARN of the KMS key to use to encrypt the logs in the CloudWatch Log Group
- # used for storing container logs streamed with FluentBit. Set to null to disable
- # encryption.
+ # The ARN of the KMS key to use to encrypt the logs in the CloudWatch Log
+ # Group used for storing container logs streamed with FluentBit. Set to null
+ # to disable encryption.
fluent_bit_log_group_kms_key_id = null
- # Name of the CloudWatch Log Group fluent-bit should use to stream logs to. When
- # null (default), uses the eks_cluster_name as the Log Group name.
+ # Name of the CloudWatch Log Group fluent-bit should use to stream logs to.
+ # When null (default), uses the eks_cluster_name as the Log Group name.
fluent_bit_log_group_name = null
- # number of days to retain log events. Possible values are: 1, 3, 5, 7, 14, 30,
- # 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653, and 0. Select 0 to never
- # expire.
+ # number of days to retain log events. Possible values are: 1, 3, 5, 7, 14,
+ # 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653, and 0. Select 0
+ # to never expire.
fluent_bit_log_group_retention = 0
- # ARN of the lambda function to trigger when events arrive at the fluent bit log
- # group.
+ # ARN of the lambda function to trigger when events arrive at the fluent bit
+ # log group.
fluent_bit_log_group_subscription_arn = null
# Filter pattern for the CloudWatch subscription. Only used if
# var.fluent_bit_log_group_subscription_arn is set.
fluent_bit_log_group_subscription_filter = ""
- # Prefix string to use for the CloudWatch Log Stream that gets created for each
- # pod. When null (default), the prefix is set to 'fluentbit'.
+ # Prefix string to use for the CloudWatch Log Stream that gets created for
+ # each pod. When null (default), the prefix is set to 'fluentbit'.
fluent_bit_log_stream_prefix = null
# Configure affinity rules for the fluent-bit Pods to control which nodes to
# schedule on. Each item in the list should be a map with the keys `key`,
- # `values`, and `operator`, corresponding to the 3 properties of matchExpressions.
- # Note that all expressions must be satisfied to schedule on the node.
+ # `values`, and `operator`, corresponding to the 3 properties of
+ # matchExpressions. Note that all expressions must be satisfied to schedule on
+ # the node.
fluent_bit_pod_node_affinity = []
- # Configure tolerations rules to allow the fluent-bit Pods to schedule on nodes
- # that have been tainted. Each item in the list specifies a toleration rule.
+ # Configure tolerations rules to allow the fluent-bit Pods to schedule on
+ # nodes that have been tainted. Each item in the list specifies a toleration
+ # rule.
fluent_bit_pod_tolerations = []
- # Optionally use a cri parser instead of the default Docker parser. This should be
- # used for EKS v1.24 and later.
+ # Optionally use a cri parser instead of the default Docker parser. This
+ # should be used for EKS v1.24 and later.
fluent_bit_use_cri_parser_conf = true
# Which version of aws-for-fluent-bit to install. When null, uses the default
# version set in the chart. Only applies to non-fargate workers.
fluent_bit_version = null
- # A map of PriorityClass configurations, with the key as the PriorityClass name.
- # https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/
- # priorityclass
+ # A map of PriorityClass configurations, with the key as the PriorityClass
+ # name.
+ # https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass
kubernetes_priority_classes = {}
# Policy for how DNS records are sychronized between sources and providers
@@ -870,42 +889,43 @@ inputs = {
# When true, the cluster autoscaler pods will be scheduled on Fargate. It is
# recommended to run the cluster autoscaler on Fargate to avoid the autoscaler
- # scaling down a node where it is running (and thus shutting itself down during a
- # scale down event). However, since Fargate is only supported on a handful of
- # regions, we don't default to true here.
+ # scaling down a node where it is running (and thus shutting itself down
+ # during a scale down event). However, since Fargate is only supported on a
+ # handful of regions, we don't default to true here.
schedule_cluster_autoscaler_on_fargate = false
# When true, the external-dns pods will be scheduled on Fargate.
schedule_external_dns_on_fargate = false
- # Configure Kubernetes Services to lookup external DNS records. This can be useful
- # to bind friendly internal service names to domains (e.g. the RDS database
- # endpoint).
+ # Configure Kubernetes Services to lookup external DNS records. This can be
+ # useful to bind friendly internal service names to domains (e.g. the RDS
+ # database endpoint).
service_dns_mappings = {}
- # If this variable is set to true, then use an exec-based plugin to authenticate
- # and fetch tokens for EKS. This is useful because EKS clusters use short-lived
- # authentication tokens that can expire in the middle of an 'apply' or 'destroy',
- # and since the native Kubernetes provider in Terraform doesn't have a way to
- # fetch up-to-date tokens, we recommend using an exec-based provider as a
- # workaround. Use the use_kubergrunt_to_fetch_token input variable to control
- # whether kubergrunt or aws is used to fetch tokens.
+ # If this variable is set to true, then use an exec-based plugin to
+ # authenticate and fetch tokens for EKS. This is useful because EKS clusters
+ # use short-lived authentication tokens that can expire in the middle of an
+ # 'apply' or 'destroy', and since the native Kubernetes provider in Terraform
+ # doesn't have a way to fetch up-to-date tokens, we recommend using an
+ # exec-based provider as a workaround. Use the use_kubergrunt_to_fetch_token
+ # input variable to control whether kubergrunt or aws is used to fetch tokens.
use_exec_plugin_for_auth = true
- # EKS clusters use short-lived authentication tokens that can expire in the middle
- # of an 'apply' or 'destroy'. To avoid this issue, we use an exec-based plugin to
- # fetch an up-to-date token. If this variable is set to true, we'll use kubergrunt
- # to fetch the token (in which case, kubergrunt must be installed and on PATH); if
- # this variable is set to false, we'll use the aws CLI to fetch the token (in
- # which case, aws must be installed and on PATH). Note this functionality is only
- # enabled if use_exec_plugin_for_auth is set to true.
+ # EKS clusters use short-lived authentication tokens that can expire in the
+ # middle of an 'apply' or 'destroy'. To avoid this issue, we use an exec-based
+ # plugin to fetch an up-to-date token. If this variable is set to true, we'll
+ # use kubergrunt to fetch the token (in which case, kubergrunt must be
+ # installed and on PATH); if this variable is set to false, we'll use the aws
+ # CLI to fetch the token (in which case, aws must be installed and on PATH).
+ # Note this functionality is only enabled if use_exec_plugin_for_auth is set
+ # to true.
use_kubergrunt_to_fetch_token = true
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
}
@@ -2263,11 +2283,11 @@ A list of names of Kubernetes PriorityClass objects created by this module.
diff --git a/docs/reference/services/app-orchestration/amazon-eks-workers.md b/docs/reference/services/app-orchestration/amazon-eks-workers.md
index 556ff07ce3..2658f306c5 100644
--- a/docs/reference/services/app-orchestration/amazon-eks-workers.md
+++ b/docs/reference/services/app-orchestration/amazon-eks-workers.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon EKS Workers
-View Source
+View Source
Release Notes
@@ -68,9 +68,9 @@ more, see the documentation in the [terraform-aws-eks](https://github.com/gruntw
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/test): Automated tests for the modules and examples.
## Deploy
@@ -78,7 +78,7 @@ more, see the documentation in the [terraform-aws-eks](https://github.com/gruntw
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -86,7 +86,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -97,10 +97,10 @@ If you want to deploy this repo in production, check out the following resources
## Manage
For information on registering the worker IAM role to the EKS control plane, refer to the
-[IAM Roles and Kubernetes API Access](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/services/eks-workers/core-concepts.md#iam-roles-and-kubernetes-api-access) section of the documentation.
+[IAM Roles and Kubernetes API Access](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/services/eks-workers/core-concepts.md#iam-roles-and-kubernetes-api-access) section of the documentation.
For information on how to perform a blue-green deployment of the worker pools, refer to the
-[How do I perform a blue green release to roll out new versions of the module](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/services/eks-workers/core-concepts.md#how-do-i-perform-a-blue-green-release-to-roll-out-new-versions-of-the-module)
+[How do I perform a blue green release to roll out new versions of the module](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/services/eks-workers/core-concepts.md#how-do-i-perform-a-blue-green-release-to-roll-out-new-versions-of-the-module)
section of the documentation.
For information on how to manage your EKS cluster, including how to deploy Pods on Fargate, how to associate IAM roles
@@ -121,20 +121,20 @@ to Pod, how to upgrade your EKS cluster, and more, see the documentation in the
module "eks_workers" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/eks-workers?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/eks-workers?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Configure one or more self-managed Auto Scaling Groups (ASGs) to manage the EC2
- # instances in this cluster. Set to empty object ({}) if you do not wish to
- # configure self-managed ASGs.
+ # Configure one or more self-managed Auto Scaling Groups (ASGs) to manage the
+ # EC2 instances in this cluster. Set to empty object ({}) if you do not wish
+ # to configure self-managed ASGs.
autoscaling_group_configurations =
- # The AMI to run on each instance in the EKS cluster. You can build the AMI using
- # the Packer template eks-node-al2.json. One of var.cluster_instance_ami or
- # var.cluster_instance_ami_filters is required. Only used if
+ # The AMI to run on each instance in the EKS cluster. You can build the AMI
+ # using the Packer template eks-node-al2.json. One of var.cluster_instance_ami
+ # or var.cluster_instance_ami_filters is required. Only used if
# var.cluster_instance_ami_filters is null. Set to null if
# cluster_instance_ami_filters is set.
cluster_instance_ami =
@@ -156,8 +156,9 @@ module "eks_workers" {
# The name of the EKS cluster. The cluster must exist/already be deployed.
eks_cluster_name =
- # Configure one or more Node Groups to manage the EC2 instances in this cluster.
- # Set to empty object ({}) if you do not wish to configure managed node groups.
+ # Configure one or more Node Groups to manage the EC2 instances in this
+ # cluster. Set to empty object ({}) if you do not wish to configure managed
+ # node groups.
managed_node_group_configurations =
# ----------------------------------------------------------------------------------------------------
@@ -167,20 +168,21 @@ module "eks_workers" {
# A list of additional security group IDs to be attached on worker groups.
additional_security_groups_for_workers = []
- # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and disk
- # space usage) should send notifications.
+ # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
+ # disk space usage) should send notifications.
alarms_sns_topic_arn = []
# The list of CIDR blocks to allow inbound SSH access to the worker groups.
allow_inbound_ssh_from_cidr_blocks = []
- # The list of security group IDs to allow inbound SSH access to the worker groups.
+ # The list of security group IDs to allow inbound SSH access to the worker
+ # groups.
allow_inbound_ssh_from_security_groups = []
- # Custom name for the IAM role for the Self-managed workers. When null, a default
- # name based on worker_name_prefix will be used. One of asg_custom_iam_role_name
- # and asg_iam_role_arn is required (must be non-null) if
- # asg_iam_role_already_exists is true.
+ # Custom name for the IAM role for the Self-managed workers. When null, a
+ # default name based on worker_name_prefix will be used. One of
+ # asg_custom_iam_role_name and asg_iam_role_arn is required (must be non-null)
+ # if asg_iam_role_already_exists is true.
asg_custom_iam_role_name = null
# Default value for enable_detailed_monitoring field of
@@ -226,12 +228,12 @@ module "eks_workers" {
# max_pods_allowed will use this value.
asg_default_max_pods_allowed = null
- # Default value for the max_size field of autoscaling_group_configurations. Any
- # map entry that does not specify max_size will use this value.
+ # Default value for the max_size field of autoscaling_group_configurations.
+ # Any map entry that does not specify max_size will use this value.
asg_default_max_size = 2
- # Default value for the min_size field of autoscaling_group_configurations. Any
- # map entry that does not specify min_size will use this value.
+ # Default value for the min_size field of autoscaling_group_configurations.
+ # Any map entry that does not specify min_size will use this value.
asg_default_min_size = 1
# Default value for the multi_instance_overrides field of
@@ -264,13 +266,14 @@ module "eks_workers" {
# spot_instance_pools will use this value.
asg_default_spot_instance_pools = null
- # Default value for the spot_max_price field of autoscaling_group_configurations.
- # Any map entry that does not specify spot_max_price will use this value. Set to
- # empty string (default) to mean on-demand price.
+ # Default value for the spot_max_price field of
+ # autoscaling_group_configurations. Any map entry that does not specify
+ # spot_max_price will use this value. Set to empty string (default) to mean
+ # on-demand price.
asg_default_spot_max_price = null
- # Default value for the tags field of autoscaling_group_configurations. Any map
- # entry that does not specify tags will use this value.
+ # Default value for the tags field of autoscaling_group_configurations. Any
+ # map entry that does not specify tags will use this value.
asg_default_tags = []
# Default value for the use_multi_instances_policy field of
@@ -283,65 +286,67 @@ module "eks_workers" {
# true, this will be used as a name prefix.
asg_iam_instance_profile_name = null
- # Whether or not the IAM role used for the Self-managed workers already exists.
- # When false, this module will create a new IAM role.
+ # Whether or not the IAM role used for the Self-managed workers already
+ # exists. When false, this module will create a new IAM role.
asg_iam_role_already_exists = false
- # ARN of the IAM role to use if iam_role_already_exists = true. When null, uses
- # asg_custom_iam_role_name to lookup the ARN. One of asg_custom_iam_role_name and
- # asg_iam_role_arn is required (must be non-null) if asg_iam_role_already_exists
- # is true.
+ # ARN of the IAM role to use if iam_role_already_exists = true. When null,
+ # uses asg_custom_iam_role_name to lookup the ARN. One of
+ # asg_custom_iam_role_name and asg_iam_role_arn is required (must be non-null)
+ # if asg_iam_role_already_exists is true.
asg_iam_role_arn = null
# A map of tags to apply to the Security Group of the ASG for the self managed
# worker pool. The key is the tag name and the value is the tag value.
asg_security_group_tags = {}
- # When true, all the relevant resources for self managed workers will be set to
- # use the name_prefix attribute so that unique names are generated for them. This
- # allows those resources to support recreation through create_before_destroy
- # lifecycle rules. Set to false if you were using any version before 0.65.0 and
- # wish to avoid recreating the entire worker pool on your cluster.
+ # When true, all the relevant resources for self managed workers will be set
+ # to use the name_prefix attribute so that unique names are generated for
+ # them. This allows those resources to support recreation through
+ # create_before_destroy lifecycle rules. Set to false if you were using any
+ # version before 0.65.0 and wish to avoid recreating the entire worker pool on
+ # your cluster.
asg_use_resource_name_prefix = true
# Adds additional tags to each ASG that allow a cluster autoscaler to
# auto-discover them. Only used for self-managed workers.
autoscaling_group_include_autoscaler_discovery_tags = true
- # Namespace where the AWS Auth Merger is deployed. If configured, the worker IAM
- # role will be mapped to the Kubernetes RBAC group for Nodes using a ConfigMap in
- # the auth merger namespace.
+ # Namespace where the AWS Auth Merger is deployed. If configured, the worker
+ # IAM role will be mapped to the Kubernetes RBAC group for Nodes using a
+ # ConfigMap in the auth merger namespace.
aws_auth_merger_namespace = null
- # Cloud init scripts to run on the EKS worker nodes when it is booting. See the
- # part blocks in
+ # Cloud init scripts to run on the EKS worker nodes when it is booting. See
+ # the part blocks in
# https://www.terraform.io/docs/providers/template/d/cloudinit_config.html for
- # syntax. To override the default boot script installed as part of the module, use
- # the key `default`.
+ # syntax. To override the default boot script installed as part of the module,
+ # use the key `default`.
cloud_init_parts = {}
# The ID (ARN, alias ARN, AWS ID) of a customer managed KMS Key to use for
- # encrypting log data. Only used if var.enable_cloudwatch_log_aggregation is true.
+ # encrypting log data. Only used if var.enable_cloudwatch_log_aggregation is
+ # true.
cloudwatch_log_group_kms_key_id = null
- # Name of the CloudWatch Log Group where server system logs are reported to. Only
- # used if var.enable_cloudwatch_log_aggregation is true.
+ # Name of the CloudWatch Log Group where server system logs are reported to.
+ # Only used if var.enable_cloudwatch_log_aggregation is true.
cloudwatch_log_group_name = null
# The number of days to retain log events in the log group. Refer to
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # watch_log_group#retention_in_days for all the valid values. When null, the log
- # events are retained forever. Only used if var.enable_cloudwatch_log_aggregation
- # is true.
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group#retention_in_days
+ # for all the valid values. When null, the log events are retained forever.
+ # Only used if var.enable_cloudwatch_log_aggregation is true.
cloudwatch_log_group_retention_in_days = null
- # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys are
- # tag keys and values are tag values. Only used if
+ # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys
+ # are tag keys and values are tag values. Only used if
# var.enable_cloudwatch_log_aggregation is true.
cloudwatch_log_group_tags = null
# Whether or not to associate a public IP address to the instances of the self
- # managed ASGs. Will only work if the instances are launched in a public subnet.
+ # managed ASGs. Will only work if the instances are launched in a public
+ # subnet.
cluster_instance_associate_public_ip_address = false
# The name of the Key Pair that can be used to SSH to each instance in the EKS
@@ -360,56 +365,54 @@ module "eks_workers" {
# dashboard.
dashboard_cpu_usage_widget_parameters = {"height":6,"period":60,"width":8}
- # Parameters for the worker disk usage widget to output for use in a CloudWatch
- # dashboard.
+ # Parameters for the worker disk usage widget to output for use in a
+ # CloudWatch dashboard.
dashboard_disk_usage_widget_parameters = {"height":6,"period":60,"width":8}
- # Parameters for the worker memory usage widget to output for use in a CloudWatch
- # dashboard.
+ # Parameters for the worker memory usage widget to output for use in a
+ # CloudWatch dashboard.
dashboard_memory_usage_widget_parameters = {"height":6,"period":60,"width":8}
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arn.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arn.
enable_cloudwatch_alarms = true
# Set to true to send logs to CloudWatch. This is useful in combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/log
- # /cloudwatch-log-aggregation-scripts to do log aggregation in CloudWatch. Note
- # that this is only recommended for aggregating system level logs from the server
- # instances. Container logs should be managed through fluent-bit deployed with
- # eks-core-services.
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/logs/cloudwatch-log-aggregation-scripts
+ # to do log aggregation in CloudWatch. Note that this is only recommended for
+ # aggregating system level logs from the server instances. Container logs
+ # should be managed through fluent-bit deployed with eks-core-services.
enable_cloudwatch_log_aggregation = false
- # Set to true to add IAM permissions to send custom metrics to CloudWatch. This is
- # useful in combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/age
- # ts/cloudwatch-agent to get memory and disk metrics in CloudWatch for your
- # Bastion host.
+ # Set to true to add IAM permissions to send custom metrics to CloudWatch.
+ # This is useful in combination with
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/agents/cloudwatch-agent
+ # to get memory and disk metrics in CloudWatch for your Bastion host.
enable_cloudwatch_metrics = true
# Enable fail2ban to block brute force log in attempts. Defaults to true.
enable_fail2ban = true
- # If you are using ssh-grunt and your IAM users / groups are defined in a separate
- # AWS account, you can use this variable to specify the ARN of an IAM role that
- # ssh-grunt can assume to retrieve IAM group and public SSH key info from that
- # account. To omit this variable, set it to an empty string (do NOT use null, or
- # Terraform will complain).
+ # If you are using ssh-grunt and your IAM users / groups are defined in a
+ # separate AWS account, you can use this variable to specify the ARN of an IAM
+ # role that ssh-grunt can assume to retrieve IAM group and public SSH key info
+ # from that account. To omit this variable, set it to an empty string (do NOT
+ # use null, or Terraform will complain).
external_account_ssh_grunt_role_arn = ""
- # The period, in seconds, over which to measure the CPU utilization percentage for
- # the ASG.
+ # The period, in seconds, over which to measure the CPU utilization percentage
+ # for the ASG.
high_worker_cpu_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster CPU utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster CPU utilization
+ # percentage above this threshold.
high_worker_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_worker_cpu_utilization_treat_missing_data = "missing"
# The period, in seconds, over which to measure the root disk utilization
@@ -420,42 +423,42 @@ module "eks_workers" {
# percentage above this threshold.
high_worker_disk_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_worker_disk_utilization_treat_missing_data = "missing"
- # The period, in seconds, over which to measure the Memory utilization percentage
- # for the ASG.
+ # The period, in seconds, over which to measure the Memory utilization
+ # percentage for the ASG.
high_worker_memory_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster Memory utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster Memory utilization
+ # percentage above this threshold.
high_worker_memory_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_worker_memory_utilization_treat_missing_data = "missing"
- # Custom name for the IAM role for the Managed Node Groups. When null, a default
- # name based on worker_name_prefix will be used. One of
- # managed_node_group_custom_iam_role_name and managed_node_group_iam_role_arn is
- # required (must be non-null) if managed_node_group_iam_role_already_exists is
- # true.
+ # Custom name for the IAM role for the Managed Node Groups. When null, a
+ # default name based on worker_name_prefix will be used. One of
+ # managed_node_group_custom_iam_role_name and managed_node_group_iam_role_arn
+ # is required (must be non-null) if managed_node_group_iam_role_already_exists
+ # is true.
managed_node_group_custom_iam_role_name = null
# Whether or not the IAM role used for the Managed Node Group workers already
# exists. When false, this module will create a new IAM role.
managed_node_group_iam_role_already_exists = false
- # ARN of the IAM role to use if iam_role_already_exists = true. When null, uses
- # managed_node_group_custom_iam_role_name to lookup the ARN. One of
- # managed_node_group_custom_iam_role_name and managed_node_group_iam_role_arn is
- # required (must be non-null) if managed_node_group_iam_role_already_exists is
- # true.
+ # ARN of the IAM role to use if iam_role_already_exists = true. When null,
+ # uses managed_node_group_custom_iam_role_name to lookup the ARN. One of
+ # managed_node_group_custom_iam_role_name and managed_node_group_iam_role_arn
+ # is required (must be non-null) if managed_node_group_iam_role_already_exists
+ # is true.
managed_node_group_iam_role_arn = null
# Default value for capacity_type field of managed_node_group_configurations.
@@ -493,8 +496,8 @@ module "eks_workers" {
node_group_default_instance_types = null
# Default value for labels field of managed_node_group_configurations. Unlike
- # common_labels which will always be merged in, these labels are only used if the
- # labels field is omitted from the configuration.
+ # common_labels which will always be merged in, these labels are only used if
+ # the labels field is omitted from the configuration.
node_group_default_labels = {}
# Default value for the max_pods_allowed field of
@@ -512,101 +515,104 @@ module "eks_workers" {
node_group_default_subnet_ids = null
# Default value for tags field of managed_node_group_configurations. Unlike
- # common_tags which will always be merged in, these tags are only used if the tags
- # field is omitted from the configuration.
+ # common_tags which will always be merged in, these tags are only used if the
+ # tags field is omitted from the configuration.
node_group_default_tags = {}
# Default value for taint field of node_group_configurations. These taints are
# only used if the taint field is omitted from the configuration.
node_group_default_taints = []
- # The instance type to configure in the launch template. This value will be used
- # when the instance_types field is set to null (NOT omitted, in which case
- # var.node_group_default_instance_types will be used).
+ # The instance type to configure in the launch template. This value will be
+ # used when the instance_types field is set to null (NOT omitted, in which
+ # case var.node_group_default_instance_types will be used).
node_group_launch_template_instance_type = null
- # Tags assigned to a node group are mirrored to the underlaying autoscaling group
- # by default. If you want to disable this behaviour, set this flag to false. Note
- # that this assumes that there is a one-to-one mappping between ASGs and Node
- # Groups. If there is more than one ASG mapped to the Node Group, then this will
- # only apply the tags on the first one. Due to a limitation in Terraform for_each
- # where it can not depend on dynamic data, it is currently not possible in the
- # module to map the tags to all ASGs. If you wish to apply the tags to all
- # underlying ASGs, then it is recommended to call the aws_autoscaling_group_tag
- # resource in a separate terraform state file outside of this module, or use a
- # two-stage apply process.
+ # Tags assigned to a node group are mirrored to the underlaying autoscaling
+ # group by default. If you want to disable this behaviour, set this flag to
+ # false. Note that this assumes that there is a one-to-one mappping between
+ # ASGs and Node Groups. If there is more than one ASG mapped to the Node
+ # Group, then this will only apply the tags on the first one. Due to a
+ # limitation in Terraform for_each where it can not depend on dynamic data, it
+ # is currently not possible in the module to map the tags to all ASGs. If you
+ # wish to apply the tags to all underlying ASGs, then it is recommended to
+ # call the aws_autoscaling_group_tag resource in a separate terraform state
+ # file outside of this module, or use a two-stage apply process.
node_group_mirror_tags_to_asg = true
- # The names of the node groups. When null, this value is automatically calculated
- # from the managed_node_group_configurations map. This variable must be set if any
- # of the values of the managed_node_group_configurations map depends on a resource
- # that is not available at plan time to work around terraform limitations with
- # for_each.
+ # The names of the node groups. When null, this value is automatically
+ # calculated from the managed_node_group_configurations map. This variable
+ # must be set if any of the values of the managed_node_group_configurations
+ # map depends on a resource that is not available at plan time to work around
+ # terraform limitations with for_each.
node_group_names = null
# A map of tags to apply to the Security Group of the ASG for the managed node
# group pool. The key is the tag name and the value is the tag value.
node_group_security_group_tags = {}
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to the EKS workers. To omit this variable, set it to an
- # empty string (do NOT use null, or Terraform will complain).
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to the EKS workers. To omit this variable, set
+ # it to an empty string (do NOT use null, or Terraform will complain).
ssh_grunt_iam_group = "ssh-grunt-users"
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to the EKS workers with sudo permissions. To omit this
- # variable, set it to an empty string (do NOT use null, or Terraform will
- # complain).
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to the EKS workers with sudo permissions. To
+ # omit this variable, set it to an empty string (do NOT use null, or Terraform
+ # will complain).
ssh_grunt_iam_group_sudo = "ssh-grunt-sudo-users"
# The tenancy of the servers in the self-managed worker ASG. Must be one of:
# default, dedicated, or host.
tenancy = "default"
- # If this variable is set to true, then use an exec-based plugin to authenticate
- # and fetch tokens for EKS. This is useful because EKS clusters use short-lived
- # authentication tokens that can expire in the middle of an 'apply' or 'destroy',
- # and since the native Kubernetes provider in Terraform doesn't have a way to
- # fetch up-to-date tokens, we recommend using an exec-based provider as a
- # workaround. Use the use_kubergrunt_to_fetch_token input variable to control
- # whether kubergrunt or aws is used to fetch tokens.
+ # If this variable is set to true, then use an exec-based plugin to
+ # authenticate and fetch tokens for EKS. This is useful because EKS clusters
+ # use short-lived authentication tokens that can expire in the middle of an
+ # 'apply' or 'destroy', and since the native Kubernetes provider in Terraform
+ # doesn't have a way to fetch up-to-date tokens, we recommend using an
+ # exec-based provider as a workaround. Use the use_kubergrunt_to_fetch_token
+ # input variable to control whether kubergrunt or aws is used to fetch tokens.
use_exec_plugin_for_auth = true
- # Set this variable to true to enable the use of Instance Metadata Service Version
- # 1 in this module's aws_launch_template. Note that while IMDsv2 is preferred due
- # to its special security hardening, we allow this in order to support the use
- # case of AMIs built outside of these modules that depend on IMDSv1.
+ # Set this variable to true to enable the use of Instance Metadata Service
+ # Version 1 in this module's aws_launch_template. Note that while IMDsv2 is
+ # preferred due to its special security hardening, we allow this in order to
+ # support the use case of AMIs built outside of these modules that depend on
+ # IMDSv1.
use_imdsv1 = false
- # EKS clusters use short-lived authentication tokens that can expire in the middle
- # of an 'apply' or 'destroy'. To avoid this issue, we use an exec-based plugin to
- # fetch an up-to-date token. If this variable is set to true, we'll use kubergrunt
- # to fetch the token (in which case, kubergrunt must be installed and on PATH); if
- # this variable is set to false, we'll use the aws CLI to fetch the token (in
- # which case, aws must be installed and on PATH). Note this functionality is only
- # enabled if use_exec_plugin_for_auth is set to true.
+ # EKS clusters use short-lived authentication tokens that can expire in the
+ # middle of an 'apply' or 'destroy'. To avoid this issue, we use an exec-based
+ # plugin to fetch an up-to-date token. If this variable is set to true, we'll
+ # use kubergrunt to fetch the token (in which case, kubergrunt must be
+ # installed and on PATH); if this variable is set to false, we'll use the aws
+ # CLI to fetch the token (in which case, aws must be installed and on PATH).
+ # Note this functionality is only enabled if use_exec_plugin_for_auth is set
+ # to true.
use_kubergrunt_to_fetch_token = true
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
# When true, assumes prefix delegation mode is in use for the AWS VPC CNI
# component of the EKS cluster when computing max pods allowed on the node. In
- # prefix delegation mode, each ENI will be allocated 16 IP addresses (/28) instead
- # of 1, allowing you to pack more Pods per node.
+ # prefix delegation mode, each ENI will be allocated 16 IP addresses (/28)
+ # instead of 1, allowing you to pack more Pods per node.
use_prefix_mode_to_calculate_max_pods = false
- # Name of the IAM role to Kubernetes RBAC group mapping ConfigMap. Only used if
- # aws_auth_merger_namespace is not null.
+ # Name of the IAM role to Kubernetes RBAC group mapping ConfigMap. Only used
+ # if aws_auth_merger_namespace is not null.
worker_k8s_role_mapping_name = "eks-cluster-worker-iam-mapping"
- # Prefix EKS worker resource names with this string. When you have multiple worker
- # groups for the cluster, you can use this to namespace the resources. Defaults to
- # empty string so that resource names are not excessively long by default.
+ # Prefix EKS worker resource names with this string. When you have multiple
+ # worker groups for the cluster, you can use this to namespace the resources.
+ # Defaults to empty string so that resource names are not excessively long by
+ # default.
worker_name_prefix = ""
}
@@ -624,7 +630,7 @@ module "eks_workers" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/eks-workers?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/eks-workers?ref=v0.104.12"
}
inputs = {
@@ -633,14 +639,14 @@ inputs = {
# REQUIRED VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Configure one or more self-managed Auto Scaling Groups (ASGs) to manage the EC2
- # instances in this cluster. Set to empty object ({}) if you do not wish to
- # configure self-managed ASGs.
+ # Configure one or more self-managed Auto Scaling Groups (ASGs) to manage the
+ # EC2 instances in this cluster. Set to empty object ({}) if you do not wish
+ # to configure self-managed ASGs.
autoscaling_group_configurations =
- # The AMI to run on each instance in the EKS cluster. You can build the AMI using
- # the Packer template eks-node-al2.json. One of var.cluster_instance_ami or
- # var.cluster_instance_ami_filters is required. Only used if
+ # The AMI to run on each instance in the EKS cluster. You can build the AMI
+ # using the Packer template eks-node-al2.json. One of var.cluster_instance_ami
+ # or var.cluster_instance_ami_filters is required. Only used if
# var.cluster_instance_ami_filters is null. Set to null if
# cluster_instance_ami_filters is set.
cluster_instance_ami =
@@ -662,8 +668,9 @@ inputs = {
# The name of the EKS cluster. The cluster must exist/already be deployed.
eks_cluster_name =
- # Configure one or more Node Groups to manage the EC2 instances in this cluster.
- # Set to empty object ({}) if you do not wish to configure managed node groups.
+ # Configure one or more Node Groups to manage the EC2 instances in this
+ # cluster. Set to empty object ({}) if you do not wish to configure managed
+ # node groups.
managed_node_group_configurations =
# ----------------------------------------------------------------------------------------------------
@@ -673,20 +680,21 @@ inputs = {
# A list of additional security group IDs to be attached on worker groups.
additional_security_groups_for_workers = []
- # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and disk
- # space usage) should send notifications.
+ # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
+ # disk space usage) should send notifications.
alarms_sns_topic_arn = []
# The list of CIDR blocks to allow inbound SSH access to the worker groups.
allow_inbound_ssh_from_cidr_blocks = []
- # The list of security group IDs to allow inbound SSH access to the worker groups.
+ # The list of security group IDs to allow inbound SSH access to the worker
+ # groups.
allow_inbound_ssh_from_security_groups = []
- # Custom name for the IAM role for the Self-managed workers. When null, a default
- # name based on worker_name_prefix will be used. One of asg_custom_iam_role_name
- # and asg_iam_role_arn is required (must be non-null) if
- # asg_iam_role_already_exists is true.
+ # Custom name for the IAM role for the Self-managed workers. When null, a
+ # default name based on worker_name_prefix will be used. One of
+ # asg_custom_iam_role_name and asg_iam_role_arn is required (must be non-null)
+ # if asg_iam_role_already_exists is true.
asg_custom_iam_role_name = null
# Default value for enable_detailed_monitoring field of
@@ -732,12 +740,12 @@ inputs = {
# max_pods_allowed will use this value.
asg_default_max_pods_allowed = null
- # Default value for the max_size field of autoscaling_group_configurations. Any
- # map entry that does not specify max_size will use this value.
+ # Default value for the max_size field of autoscaling_group_configurations.
+ # Any map entry that does not specify max_size will use this value.
asg_default_max_size = 2
- # Default value for the min_size field of autoscaling_group_configurations. Any
- # map entry that does not specify min_size will use this value.
+ # Default value for the min_size field of autoscaling_group_configurations.
+ # Any map entry that does not specify min_size will use this value.
asg_default_min_size = 1
# Default value for the multi_instance_overrides field of
@@ -770,13 +778,14 @@ inputs = {
# spot_instance_pools will use this value.
asg_default_spot_instance_pools = null
- # Default value for the spot_max_price field of autoscaling_group_configurations.
- # Any map entry that does not specify spot_max_price will use this value. Set to
- # empty string (default) to mean on-demand price.
+ # Default value for the spot_max_price field of
+ # autoscaling_group_configurations. Any map entry that does not specify
+ # spot_max_price will use this value. Set to empty string (default) to mean
+ # on-demand price.
asg_default_spot_max_price = null
- # Default value for the tags field of autoscaling_group_configurations. Any map
- # entry that does not specify tags will use this value.
+ # Default value for the tags field of autoscaling_group_configurations. Any
+ # map entry that does not specify tags will use this value.
asg_default_tags = []
# Default value for the use_multi_instances_policy field of
@@ -789,65 +798,67 @@ inputs = {
# true, this will be used as a name prefix.
asg_iam_instance_profile_name = null
- # Whether or not the IAM role used for the Self-managed workers already exists.
- # When false, this module will create a new IAM role.
+ # Whether or not the IAM role used for the Self-managed workers already
+ # exists. When false, this module will create a new IAM role.
asg_iam_role_already_exists = false
- # ARN of the IAM role to use if iam_role_already_exists = true. When null, uses
- # asg_custom_iam_role_name to lookup the ARN. One of asg_custom_iam_role_name and
- # asg_iam_role_arn is required (must be non-null) if asg_iam_role_already_exists
- # is true.
+ # ARN of the IAM role to use if iam_role_already_exists = true. When null,
+ # uses asg_custom_iam_role_name to lookup the ARN. One of
+ # asg_custom_iam_role_name and asg_iam_role_arn is required (must be non-null)
+ # if asg_iam_role_already_exists is true.
asg_iam_role_arn = null
# A map of tags to apply to the Security Group of the ASG for the self managed
# worker pool. The key is the tag name and the value is the tag value.
asg_security_group_tags = {}
- # When true, all the relevant resources for self managed workers will be set to
- # use the name_prefix attribute so that unique names are generated for them. This
- # allows those resources to support recreation through create_before_destroy
- # lifecycle rules. Set to false if you were using any version before 0.65.0 and
- # wish to avoid recreating the entire worker pool on your cluster.
+ # When true, all the relevant resources for self managed workers will be set
+ # to use the name_prefix attribute so that unique names are generated for
+ # them. This allows those resources to support recreation through
+ # create_before_destroy lifecycle rules. Set to false if you were using any
+ # version before 0.65.0 and wish to avoid recreating the entire worker pool on
+ # your cluster.
asg_use_resource_name_prefix = true
# Adds additional tags to each ASG that allow a cluster autoscaler to
# auto-discover them. Only used for self-managed workers.
autoscaling_group_include_autoscaler_discovery_tags = true
- # Namespace where the AWS Auth Merger is deployed. If configured, the worker IAM
- # role will be mapped to the Kubernetes RBAC group for Nodes using a ConfigMap in
- # the auth merger namespace.
+ # Namespace where the AWS Auth Merger is deployed. If configured, the worker
+ # IAM role will be mapped to the Kubernetes RBAC group for Nodes using a
+ # ConfigMap in the auth merger namespace.
aws_auth_merger_namespace = null
- # Cloud init scripts to run on the EKS worker nodes when it is booting. See the
- # part blocks in
+ # Cloud init scripts to run on the EKS worker nodes when it is booting. See
+ # the part blocks in
# https://www.terraform.io/docs/providers/template/d/cloudinit_config.html for
- # syntax. To override the default boot script installed as part of the module, use
- # the key `default`.
+ # syntax. To override the default boot script installed as part of the module,
+ # use the key `default`.
cloud_init_parts = {}
# The ID (ARN, alias ARN, AWS ID) of a customer managed KMS Key to use for
- # encrypting log data. Only used if var.enable_cloudwatch_log_aggregation is true.
+ # encrypting log data. Only used if var.enable_cloudwatch_log_aggregation is
+ # true.
cloudwatch_log_group_kms_key_id = null
- # Name of the CloudWatch Log Group where server system logs are reported to. Only
- # used if var.enable_cloudwatch_log_aggregation is true.
+ # Name of the CloudWatch Log Group where server system logs are reported to.
+ # Only used if var.enable_cloudwatch_log_aggregation is true.
cloudwatch_log_group_name = null
# The number of days to retain log events in the log group. Refer to
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # watch_log_group#retention_in_days for all the valid values. When null, the log
- # events are retained forever. Only used if var.enable_cloudwatch_log_aggregation
- # is true.
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group#retention_in_days
+ # for all the valid values. When null, the log events are retained forever.
+ # Only used if var.enable_cloudwatch_log_aggregation is true.
cloudwatch_log_group_retention_in_days = null
- # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys are
- # tag keys and values are tag values. Only used if
+ # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys
+ # are tag keys and values are tag values. Only used if
# var.enable_cloudwatch_log_aggregation is true.
cloudwatch_log_group_tags = null
# Whether or not to associate a public IP address to the instances of the self
- # managed ASGs. Will only work if the instances are launched in a public subnet.
+ # managed ASGs. Will only work if the instances are launched in a public
+ # subnet.
cluster_instance_associate_public_ip_address = false
# The name of the Key Pair that can be used to SSH to each instance in the EKS
@@ -866,56 +877,54 @@ inputs = {
# dashboard.
dashboard_cpu_usage_widget_parameters = {"height":6,"period":60,"width":8}
- # Parameters for the worker disk usage widget to output for use in a CloudWatch
- # dashboard.
+ # Parameters for the worker disk usage widget to output for use in a
+ # CloudWatch dashboard.
dashboard_disk_usage_widget_parameters = {"height":6,"period":60,"width":8}
- # Parameters for the worker memory usage widget to output for use in a CloudWatch
- # dashboard.
+ # Parameters for the worker memory usage widget to output for use in a
+ # CloudWatch dashboard.
dashboard_memory_usage_widget_parameters = {"height":6,"period":60,"width":8}
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arn.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arn.
enable_cloudwatch_alarms = true
# Set to true to send logs to CloudWatch. This is useful in combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/log
- # /cloudwatch-log-aggregation-scripts to do log aggregation in CloudWatch. Note
- # that this is only recommended for aggregating system level logs from the server
- # instances. Container logs should be managed through fluent-bit deployed with
- # eks-core-services.
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/logs/cloudwatch-log-aggregation-scripts
+ # to do log aggregation in CloudWatch. Note that this is only recommended for
+ # aggregating system level logs from the server instances. Container logs
+ # should be managed through fluent-bit deployed with eks-core-services.
enable_cloudwatch_log_aggregation = false
- # Set to true to add IAM permissions to send custom metrics to CloudWatch. This is
- # useful in combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/age
- # ts/cloudwatch-agent to get memory and disk metrics in CloudWatch for your
- # Bastion host.
+ # Set to true to add IAM permissions to send custom metrics to CloudWatch.
+ # This is useful in combination with
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/agents/cloudwatch-agent
+ # to get memory and disk metrics in CloudWatch for your Bastion host.
enable_cloudwatch_metrics = true
# Enable fail2ban to block brute force log in attempts. Defaults to true.
enable_fail2ban = true
- # If you are using ssh-grunt and your IAM users / groups are defined in a separate
- # AWS account, you can use this variable to specify the ARN of an IAM role that
- # ssh-grunt can assume to retrieve IAM group and public SSH key info from that
- # account. To omit this variable, set it to an empty string (do NOT use null, or
- # Terraform will complain).
+ # If you are using ssh-grunt and your IAM users / groups are defined in a
+ # separate AWS account, you can use this variable to specify the ARN of an IAM
+ # role that ssh-grunt can assume to retrieve IAM group and public SSH key info
+ # from that account. To omit this variable, set it to an empty string (do NOT
+ # use null, or Terraform will complain).
external_account_ssh_grunt_role_arn = ""
- # The period, in seconds, over which to measure the CPU utilization percentage for
- # the ASG.
+ # The period, in seconds, over which to measure the CPU utilization percentage
+ # for the ASG.
high_worker_cpu_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster CPU utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster CPU utilization
+ # percentage above this threshold.
high_worker_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_worker_cpu_utilization_treat_missing_data = "missing"
# The period, in seconds, over which to measure the root disk utilization
@@ -926,42 +935,42 @@ inputs = {
# percentage above this threshold.
high_worker_disk_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_worker_disk_utilization_treat_missing_data = "missing"
- # The period, in seconds, over which to measure the Memory utilization percentage
- # for the ASG.
+ # The period, in seconds, over which to measure the Memory utilization
+ # percentage for the ASG.
high_worker_memory_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster Memory utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster Memory utilization
+ # percentage above this threshold.
high_worker_memory_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_worker_memory_utilization_treat_missing_data = "missing"
- # Custom name for the IAM role for the Managed Node Groups. When null, a default
- # name based on worker_name_prefix will be used. One of
- # managed_node_group_custom_iam_role_name and managed_node_group_iam_role_arn is
- # required (must be non-null) if managed_node_group_iam_role_already_exists is
- # true.
+ # Custom name for the IAM role for the Managed Node Groups. When null, a
+ # default name based on worker_name_prefix will be used. One of
+ # managed_node_group_custom_iam_role_name and managed_node_group_iam_role_arn
+ # is required (must be non-null) if managed_node_group_iam_role_already_exists
+ # is true.
managed_node_group_custom_iam_role_name = null
# Whether or not the IAM role used for the Managed Node Group workers already
# exists. When false, this module will create a new IAM role.
managed_node_group_iam_role_already_exists = false
- # ARN of the IAM role to use if iam_role_already_exists = true. When null, uses
- # managed_node_group_custom_iam_role_name to lookup the ARN. One of
- # managed_node_group_custom_iam_role_name and managed_node_group_iam_role_arn is
- # required (must be non-null) if managed_node_group_iam_role_already_exists is
- # true.
+ # ARN of the IAM role to use if iam_role_already_exists = true. When null,
+ # uses managed_node_group_custom_iam_role_name to lookup the ARN. One of
+ # managed_node_group_custom_iam_role_name and managed_node_group_iam_role_arn
+ # is required (must be non-null) if managed_node_group_iam_role_already_exists
+ # is true.
managed_node_group_iam_role_arn = null
# Default value for capacity_type field of managed_node_group_configurations.
@@ -999,8 +1008,8 @@ inputs = {
node_group_default_instance_types = null
# Default value for labels field of managed_node_group_configurations. Unlike
- # common_labels which will always be merged in, these labels are only used if the
- # labels field is omitted from the configuration.
+ # common_labels which will always be merged in, these labels are only used if
+ # the labels field is omitted from the configuration.
node_group_default_labels = {}
# Default value for the max_pods_allowed field of
@@ -1018,101 +1027,104 @@ inputs = {
node_group_default_subnet_ids = null
# Default value for tags field of managed_node_group_configurations. Unlike
- # common_tags which will always be merged in, these tags are only used if the tags
- # field is omitted from the configuration.
+ # common_tags which will always be merged in, these tags are only used if the
+ # tags field is omitted from the configuration.
node_group_default_tags = {}
# Default value for taint field of node_group_configurations. These taints are
# only used if the taint field is omitted from the configuration.
node_group_default_taints = []
- # The instance type to configure in the launch template. This value will be used
- # when the instance_types field is set to null (NOT omitted, in which case
- # var.node_group_default_instance_types will be used).
+ # The instance type to configure in the launch template. This value will be
+ # used when the instance_types field is set to null (NOT omitted, in which
+ # case var.node_group_default_instance_types will be used).
node_group_launch_template_instance_type = null
- # Tags assigned to a node group are mirrored to the underlaying autoscaling group
- # by default. If you want to disable this behaviour, set this flag to false. Note
- # that this assumes that there is a one-to-one mappping between ASGs and Node
- # Groups. If there is more than one ASG mapped to the Node Group, then this will
- # only apply the tags on the first one. Due to a limitation in Terraform for_each
- # where it can not depend on dynamic data, it is currently not possible in the
- # module to map the tags to all ASGs. If you wish to apply the tags to all
- # underlying ASGs, then it is recommended to call the aws_autoscaling_group_tag
- # resource in a separate terraform state file outside of this module, or use a
- # two-stage apply process.
+ # Tags assigned to a node group are mirrored to the underlaying autoscaling
+ # group by default. If you want to disable this behaviour, set this flag to
+ # false. Note that this assumes that there is a one-to-one mappping between
+ # ASGs and Node Groups. If there is more than one ASG mapped to the Node
+ # Group, then this will only apply the tags on the first one. Due to a
+ # limitation in Terraform for_each where it can not depend on dynamic data, it
+ # is currently not possible in the module to map the tags to all ASGs. If you
+ # wish to apply the tags to all underlying ASGs, then it is recommended to
+ # call the aws_autoscaling_group_tag resource in a separate terraform state
+ # file outside of this module, or use a two-stage apply process.
node_group_mirror_tags_to_asg = true
- # The names of the node groups. When null, this value is automatically calculated
- # from the managed_node_group_configurations map. This variable must be set if any
- # of the values of the managed_node_group_configurations map depends on a resource
- # that is not available at plan time to work around terraform limitations with
- # for_each.
+ # The names of the node groups. When null, this value is automatically
+ # calculated from the managed_node_group_configurations map. This variable
+ # must be set if any of the values of the managed_node_group_configurations
+ # map depends on a resource that is not available at plan time to work around
+ # terraform limitations with for_each.
node_group_names = null
# A map of tags to apply to the Security Group of the ASG for the managed node
# group pool. The key is the tag name and the value is the tag value.
node_group_security_group_tags = {}
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to the EKS workers. To omit this variable, set it to an
- # empty string (do NOT use null, or Terraform will complain).
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to the EKS workers. To omit this variable, set
+ # it to an empty string (do NOT use null, or Terraform will complain).
ssh_grunt_iam_group = "ssh-grunt-users"
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to the EKS workers with sudo permissions. To omit this
- # variable, set it to an empty string (do NOT use null, or Terraform will
- # complain).
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to the EKS workers with sudo permissions. To
+ # omit this variable, set it to an empty string (do NOT use null, or Terraform
+ # will complain).
ssh_grunt_iam_group_sudo = "ssh-grunt-sudo-users"
# The tenancy of the servers in the self-managed worker ASG. Must be one of:
# default, dedicated, or host.
tenancy = "default"
- # If this variable is set to true, then use an exec-based plugin to authenticate
- # and fetch tokens for EKS. This is useful because EKS clusters use short-lived
- # authentication tokens that can expire in the middle of an 'apply' or 'destroy',
- # and since the native Kubernetes provider in Terraform doesn't have a way to
- # fetch up-to-date tokens, we recommend using an exec-based provider as a
- # workaround. Use the use_kubergrunt_to_fetch_token input variable to control
- # whether kubergrunt or aws is used to fetch tokens.
+ # If this variable is set to true, then use an exec-based plugin to
+ # authenticate and fetch tokens for EKS. This is useful because EKS clusters
+ # use short-lived authentication tokens that can expire in the middle of an
+ # 'apply' or 'destroy', and since the native Kubernetes provider in Terraform
+ # doesn't have a way to fetch up-to-date tokens, we recommend using an
+ # exec-based provider as a workaround. Use the use_kubergrunt_to_fetch_token
+ # input variable to control whether kubergrunt or aws is used to fetch tokens.
use_exec_plugin_for_auth = true
- # Set this variable to true to enable the use of Instance Metadata Service Version
- # 1 in this module's aws_launch_template. Note that while IMDsv2 is preferred due
- # to its special security hardening, we allow this in order to support the use
- # case of AMIs built outside of these modules that depend on IMDSv1.
+ # Set this variable to true to enable the use of Instance Metadata Service
+ # Version 1 in this module's aws_launch_template. Note that while IMDsv2 is
+ # preferred due to its special security hardening, we allow this in order to
+ # support the use case of AMIs built outside of these modules that depend on
+ # IMDSv1.
use_imdsv1 = false
- # EKS clusters use short-lived authentication tokens that can expire in the middle
- # of an 'apply' or 'destroy'. To avoid this issue, we use an exec-based plugin to
- # fetch an up-to-date token. If this variable is set to true, we'll use kubergrunt
- # to fetch the token (in which case, kubergrunt must be installed and on PATH); if
- # this variable is set to false, we'll use the aws CLI to fetch the token (in
- # which case, aws must be installed and on PATH). Note this functionality is only
- # enabled if use_exec_plugin_for_auth is set to true.
+ # EKS clusters use short-lived authentication tokens that can expire in the
+ # middle of an 'apply' or 'destroy'. To avoid this issue, we use an exec-based
+ # plugin to fetch an up-to-date token. If this variable is set to true, we'll
+ # use kubergrunt to fetch the token (in which case, kubergrunt must be
+ # installed and on PATH); if this variable is set to false, we'll use the aws
+ # CLI to fetch the token (in which case, aws must be installed and on PATH).
+ # Note this functionality is only enabled if use_exec_plugin_for_auth is set
+ # to true.
use_kubergrunt_to_fetch_token = true
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
# When true, assumes prefix delegation mode is in use for the AWS VPC CNI
# component of the EKS cluster when computing max pods allowed on the node. In
- # prefix delegation mode, each ENI will be allocated 16 IP addresses (/28) instead
- # of 1, allowing you to pack more Pods per node.
+ # prefix delegation mode, each ENI will be allocated 16 IP addresses (/28)
+ # instead of 1, allowing you to pack more Pods per node.
use_prefix_mode_to_calculate_max_pods = false
- # Name of the IAM role to Kubernetes RBAC group mapping ConfigMap. Only used if
- # aws_auth_merger_namespace is not null.
+ # Name of the IAM role to Kubernetes RBAC group mapping ConfigMap. Only used
+ # if aws_auth_merger_namespace is not null.
worker_k8s_role_mapping_name = "eks-cluster-worker-iam-mapping"
- # Prefix EKS worker resource names with this string. When you have multiple worker
- # groups for the cluster, you can use this to namespace the resources. Defaults to
- # empty string so that resource names are not excessively long by default.
+ # Prefix EKS worker resource names with this string. When you have multiple
+ # worker groups for the cluster, you can use this to namespace the resources.
+ # Defaults to empty string so that resource names are not excessively long by
+ # default.
worker_name_prefix = ""
}
@@ -2647,11 +2659,11 @@ The list of names of the ASGs that were deployed to act as EKS workers.
diff --git a/docs/reference/services/app-orchestration/amazon-eks.md b/docs/reference/services/app-orchestration/amazon-eks.md
index 112bfa2b36..8504fc2092 100644
--- a/docs/reference/services/app-orchestration/amazon-eks.md
+++ b/docs/reference/services/app-orchestration/amazon-eks.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon EKS
-View Source
+View Source
Release Notes
@@ -68,9 +68,9 @@ more, see the documentation in the [terraform-aws-eks](https://github.com/gruntw
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/test): Automated tests for the modules and examples.
## Deploy
@@ -78,7 +78,7 @@ more, see the documentation in the [terraform-aws-eks](https://github.com/gruntw
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -86,7 +86,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -116,7 +116,7 @@ To add and manage additional worker groups, refer to the [eks-workers module](/r
module "eks_cluster" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/eks-cluster?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/eks-cluster?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -144,22 +144,24 @@ module "eks_cluster" {
# A list of additional security group IDs to attach to the worker nodes.
additional_security_groups_for_workers = []
- # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and disk
- # space usage) should send notifications.
+ # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
+ # disk space usage) should send notifications.
alarms_sns_topic_arn = []
# The list of CIDR blocks to allow inbound SSH access to the worker groups.
allow_inbound_ssh_from_cidr_blocks = []
- # The list of security group IDs to allow inbound SSH access to the worker groups.
+ # The list of security group IDs to allow inbound SSH access to the worker
+ # groups.
allow_inbound_ssh_from_security_groups = []
- # The list of CIDR blocks to allow inbound access to the private Kubernetes API
- # endpoint (e.g. the endpoint within the VPC, not the public endpoint).
+ # The list of CIDR blocks to allow inbound access to the private Kubernetes
+ # API endpoint (e.g. the endpoint within the VPC, not the public endpoint).
allow_private_api_access_from_cidr_blocks = []
- # The list of security groups to allow inbound access to the private Kubernetes
- # API endpoint (e.g. the endpoint within the VPC, not the public endpoint).
+ # The list of security groups to allow inbound access to the private
+ # Kubernetes API endpoint (e.g. the endpoint within the VPC, not the public
+ # endpoint).
allow_private_api_access_from_security_groups = []
# Default value for enable_detailed_monitoring field of
@@ -205,12 +207,12 @@ module "eks_cluster" {
# max_pods_allowed will use this value.
asg_default_max_pods_allowed = null
- # Default value for the max_size field of autoscaling_group_configurations. Any
- # map entry that does not specify max_size will use this value.
+ # Default value for the max_size field of autoscaling_group_configurations.
+ # Any map entry that does not specify max_size will use this value.
asg_default_max_size = 2
- # Default value for the min_size field of autoscaling_group_configurations. Any
- # map entry that does not specify min_size will use this value.
+ # Default value for the min_size field of autoscaling_group_configurations.
+ # Any map entry that does not specify min_size will use this value.
asg_default_min_size = 1
# Default value for the multi_instance_overrides field of
@@ -243,13 +245,14 @@ module "eks_cluster" {
# spot_instance_pools will use this value.
asg_default_spot_instance_pools = null
- # Default value for the spot_max_price field of autoscaling_group_configurations.
- # Any map entry that does not specify spot_max_price will use this value. Set to
- # empty string (default) to mean on-demand price.
+ # Default value for the spot_max_price field of
+ # autoscaling_group_configurations. Any map entry that does not specify
+ # spot_max_price will use this value. Set to empty string (default) to mean
+ # on-demand price.
asg_default_spot_max_price = null
- # Default value for the tags field of autoscaling_group_configurations. Any map
- # entry that does not specify tags will use this value.
+ # Default value for the tags field of autoscaling_group_configurations. Any
+ # map entry that does not specify tags will use this value.
asg_default_tags = []
# Default value for the use_multi_instances_policy field of
@@ -270,16 +273,17 @@ module "eks_cluster" {
# worker pool. The key is the tag name and the value is the tag value.
asg_security_group_tags = {}
- # When true, all the relevant resources for self managed workers will be set to
- # use the name_prefix attribute so that unique names are generated for them. This
- # allows those resources to support recreation through create_before_destroy
- # lifecycle rules. Set to false if you were using any version before 0.65.0 and
- # wish to avoid recreating the entire worker pool on your cluster.
+ # When true, all the relevant resources for self managed workers will be set
+ # to use the name_prefix attribute so that unique names are generated for
+ # them. This allows those resources to support recreation through
+ # create_before_destroy lifecycle rules. Set to false if you were using any
+ # version before 0.65.0 and wish to avoid recreating the entire worker pool on
+ # your cluster.
asg_use_resource_name_prefix = true
- # Configure one or more Auto Scaling Groups (ASGs) to manage the EC2 instances in
- # this cluster. If any of the values are not provided, the specified default
- # variable will be used to lookup a default value.
+ # Configure one or more Auto Scaling Groups (ASGs) to manage the EC2 instances
+ # in this cluster. If any of the values are not provided, the specified
+ # default variable will be used to lookup a default value.
autoscaling_group_configurations = {}
# Adds additional tags to each ASG that allow a cluster autoscaler to
@@ -287,34 +291,34 @@ module "eks_cluster" {
autoscaling_group_include_autoscaler_discovery_tags = true
# Name of the default aws-auth ConfigMap to use. This will be the name of the
- # ConfigMap that gets created by this module in the aws-auth-merger namespace to
- # seed the initial aws-auth ConfigMap.
+ # ConfigMap that gets created by this module in the aws-auth-merger namespace
+ # to seed the initial aws-auth ConfigMap.
aws_auth_merger_default_configmap_name = "main-aws-auth"
- # Location of the container image to use for the aws-auth-merger app. You can use
- # the Dockerfile provided in terraform-aws-eks to construct an image. See
- # https://github.com/gruntwork-io/terraform-aws-eks/blob/master/modules/eks-aws-au
- # h-merger/core-concepts.md#how-do-i-use-the-aws-auth-merger for more info.
+ # Location of the container image to use for the aws-auth-merger app. You can
+ # use the Dockerfile provided in terraform-aws-eks to construct an image. See
+ # https://github.com/gruntwork-io/terraform-aws-eks/blob/master/modules/eks-aws-auth-merger/core-concepts.md#how-do-i-use-the-aws-auth-merger
+ # for more info.
aws_auth_merger_image = null
- # Namespace to deploy the aws-auth-merger into. The app will watch for ConfigMaps
- # in this Namespace to merge into the aws-auth ConfigMap.
+ # Namespace to deploy the aws-auth-merger into. The app will watch for
+ # ConfigMaps in this Namespace to merge into the aws-auth ConfigMap.
aws_auth_merger_namespace = "aws-auth-merger"
- # Cloud init scripts to run on the EKS worker nodes when it is booting. See the
- # part blocks in
+ # Cloud init scripts to run on the EKS worker nodes when it is booting. See
+ # the part blocks in
# https://www.terraform.io/docs/providers/template/d/cloudinit_config.html for
- # syntax. To override the default boot script installed as part of the module, use
- # the key `default`.
+ # syntax. To override the default boot script installed as part of the module,
+ # use the key `default`.
cloud_init_parts = {}
# ARN of permissions boundary to apply to the cluster IAM role - the IAM role
# created for the EKS cluster.
cluster_iam_role_permissions_boundary = null
- # The AMI to run on each instance in the EKS cluster. You can build the AMI using
- # the Packer template eks-node-al2.json. One of var.cluster_instance_ami or
- # var.cluster_instance_ami_filters is required. Only used if
+ # The AMI to run on each instance in the EKS cluster. You can build the AMI
+ # using the Packer template eks-node-al2.json. One of var.cluster_instance_ami
+ # or var.cluster_instance_ami_filters is required. Only used if
# var.cluster_instance_ami_filters is null. Set to null if
# cluster_instance_ami_filters is set.
cluster_instance_ami = null
@@ -328,7 +332,8 @@ module "eks_cluster" {
cluster_instance_ami_filters = null
# Whether or not to associate a public IP address to the instances of the self
- # managed ASGs. Will only work if the instances are launched in a public subnet.
+ # managed ASGs. Will only work if the instances are launched in a public
+ # subnet.
cluster_instance_associate_public_ip_address = false
# The name of the Key Pair that can be used to SSH to each instance in the EKS
@@ -341,27 +346,26 @@ module "eks_cluster" {
# The number of days to retain log events in the CloudWatch log group for EKS
# control plane logs. Refer to
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # watch_log_group#retention_in_days for all the valid values. When null, the log
- # events are retained forever.
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group#retention_in_days
+ # for all the valid values. When null, the log events are retained forever.
control_plane_cloudwatch_log_group_retention_in_days = null
- # Tags to apply on the CloudWatch Log Group for EKS control plane logs, encoded as
- # a map where the keys are tag keys and values are tag values.
+ # Tags to apply on the CloudWatch Log Group for EKS control plane logs,
+ # encoded as a map where the keys are tag keys and values are tag values.
control_plane_cloudwatch_log_group_tags = null
- # A list of availability zones in the region that we CANNOT use to deploy the EKS
- # control plane. You can use this to avoid availability zones that may not be able
- # to provision the resources (e.g ran out of capacity). If empty, will allow all
- # availability zones.
+ # A list of availability zones in the region that we CANNOT use to deploy the
+ # EKS control plane. You can use this to avoid availability zones that may not
+ # be able to provision the resources (e.g ran out of capacity). If empty, will
+ # allow all availability zones.
control_plane_disallowed_availability_zones = ["us-east-1e"]
# When true, IAM role will be created and attached to Fargate control plane
# services.
create_default_fargate_iam_role = true
- # The name to use for the default Fargate execution IAM role that is created when
- # create_default_fargate_iam_role is true. When null, defaults to
+ # The name to use for the default Fargate execution IAM role that is created
+ # when create_default_fargate_iam_role is true. When null, defaults to
# CLUSTER_NAME-fargate-role.
custom_default_fargate_iam_role_name = null
@@ -377,24 +381,24 @@ module "eks_cluster" {
# dashboard.
dashboard_cpu_usage_widget_parameters = {"height":6,"period":60,"width":8}
- # Parameters for the worker disk usage widget to output for use in a CloudWatch
- # dashboard.
+ # Parameters for the worker disk usage widget to output for use in a
+ # CloudWatch dashboard.
dashboard_disk_usage_widget_parameters = {"height":6,"period":60,"width":8}
- # Parameters for the worker memory usage widget to output for use in a CloudWatch
- # dashboard.
+ # Parameters for the worker memory usage widget to output for use in a
+ # CloudWatch dashboard.
dashboard_memory_usage_widget_parameters = {"height":6,"period":60,"width":8}
- # Map of EKS add-ons, where key is name of the add-on and value is a map of add-on
- # properties.
+ # Map of EKS add-ons, where key is name of the add-on and value is a map of
+ # add-on properties.
eks_addons = {}
- # A map of custom tags to apply to the Security Group for the EKS Cluster Control
- # Plane. The key is the tag name and the value is the tag value.
+ # A map of custom tags to apply to the Security Group for the EKS Cluster
+ # Control Plane. The key is the tag name and the value is the tag value.
eks_cluster_security_group_tags = {}
- # A map of custom tags to apply to the EKS Cluster Control Plane. The key is the
- # tag name and the value is the tag value.
+ # A map of custom tags to apply to the EKS Cluster Control Plane. The key is
+ # the tag name and the value is the tag value.
eks_cluster_tags = {}
# If set to true, installs the aws-auth-merger to manage the aws-auth
@@ -402,28 +406,28 @@ module "eks_cluster" {
# variable.
enable_aws_auth_merger = false
- # When true, deploy the aws-auth-merger into Fargate. It is recommended to run the
- # aws-auth-merger on Fargate to avoid chicken and egg issues between the
+ # When true, deploy the aws-auth-merger into Fargate. It is recommended to run
+ # the aws-auth-merger on Fargate to avoid chicken and egg issues between the
# aws-auth-merger and having an authenticated worker pool.
enable_aws_auth_merger_fargate = true
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arn.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arn.
enable_cloudwatch_alarms = true
- # Set to true to add IAM permissions to send custom metrics to CloudWatch. This is
- # useful in combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/age
- # ts/cloudwatch-agent to get memory and disk metrics in CloudWatch for your
- # Bastion host.
+ # Set to true to add IAM permissions to send custom metrics to CloudWatch.
+ # This is useful in combination with
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/agents/cloudwatch-agent
+ # to get memory and disk metrics in CloudWatch for your Bastion host.
enable_cloudwatch_metrics = true
# When set to true, the module configures EKS add-ons
- # (https://docs.aws.amazon.com/eks/latest/userguide/eks-add-ons.html) specified
- # with `eks_addons`. VPC CNI configurations with `use_vpc_cni_customize_script`
- # isn't fully supported with addons, as the automated add-on lifecycles could
- # potentially undo the configuration changes.
+ # (https://docs.aws.amazon.com/eks/latest/userguide/eks-add-ons.html)
+ # specified with `eks_addons`. VPC CNI configurations with
+ # `use_vpc_cni_customize_script` isn't fully supported with addons, as the
+ # automated add-on lifecycles could potentially undo the configuration
+ # changes.
enable_eks_addons = false
# Enable fail2ban to block brute force log in attempts. Defaults to true.
@@ -431,52 +435,51 @@ module "eks_cluster" {
# Set to true to send worker system logs to CloudWatch. This is useful in
# combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/log
- # /cloudwatch-log-aggregation-scripts to do log aggregation in CloudWatch. Note
- # that this is only recommended for aggregating system level logs from the server
- # instances. Container logs should be managed through fluent-bit deployed with
- # eks-core-services.
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/logs/cloudwatch-log-aggregation-scripts
+ # to do log aggregation in CloudWatch. Note that this is only recommended for
+ # aggregating system level logs from the server instances. Container logs
+ # should be managed through fluent-bit deployed with eks-core-services.
enable_worker_cloudwatch_log_aggregation = false
# A list of the desired control plane logging to enable. See
- # https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html for the
- # list of available logs.
+ # https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html for
+ # the list of available logs.
enabled_control_plane_log_types = ["api","audit","authenticator"]
# Whether or not to enable public API endpoints which allow access to the
- # Kubernetes API from outside of the VPC. Note that private access within the VPC
- # is always enabled.
+ # Kubernetes API from outside of the VPC. Note that private access within the
+ # VPC is always enabled.
endpoint_public_access = true
- # If you are using ssh-grunt and your IAM users / groups are defined in a separate
- # AWS account, you can use this variable to specify the ARN of an IAM role that
- # ssh-grunt can assume to retrieve IAM group and public SSH key info from that
- # account. To omit this variable, set it to an empty string (do NOT use null, or
- # Terraform will complain).
+ # If you are using ssh-grunt and your IAM users / groups are defined in a
+ # separate AWS account, you can use this variable to specify the ARN of an IAM
+ # role that ssh-grunt can assume to retrieve IAM group and public SSH key info
+ # from that account. To omit this variable, set it to an empty string (do NOT
+ # use null, or Terraform will complain).
external_account_ssh_grunt_role_arn = ""
- # List of ARNs of AWS IAM roles corresponding to Fargate Profiles that should be
- # mapped as Kubernetes Nodes.
+ # List of ARNs of AWS IAM roles corresponding to Fargate Profiles that should
+ # be mapped as Kubernetes Nodes.
fargate_profile_executor_iam_role_arns_for_k8s_role_mapping = []
- # A list of availability zones in the region that we CANNOT use to deploy the EKS
- # Fargate workers. You can use this to avoid availability zones that may not be
- # able to provision the resources (e.g ran out of capacity). If empty, will allow
- # all availability zones.
+ # A list of availability zones in the region that we CANNOT use to deploy the
+ # EKS Fargate workers. You can use this to avoid availability zones that may
+ # not be able to provision the resources (e.g ran out of capacity). If empty,
+ # will allow all availability zones.
fargate_worker_disallowed_availability_zones = ["us-east-1d","us-east-1e","ca-central-1d"]
- # The period, in seconds, over which to measure the CPU utilization percentage for
- # the ASG.
+ # The period, in seconds, over which to measure the CPU utilization percentage
+ # for the ASG.
high_worker_cpu_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster CPU utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster CPU utilization
+ # percentage above this threshold.
high_worker_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_worker_cpu_utilization_treat_missing_data = "missing"
# The period, in seconds, over which to measure the root disk utilization
@@ -487,46 +490,48 @@ module "eks_cluster" {
# percentage above this threshold.
high_worker_disk_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_worker_disk_utilization_treat_missing_data = "missing"
- # The period, in seconds, over which to measure the Memory utilization percentage
- # for the ASG.
+ # The period, in seconds, over which to measure the Memory utilization
+ # percentage for the ASG.
high_worker_memory_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster Memory utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster Memory utilization
+ # percentage above this threshold.
high_worker_memory_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_worker_memory_utilization_treat_missing_data = "missing"
- # Mapping of IAM role ARNs to Kubernetes RBAC groups that grant permissions to the
- # user.
+ # Mapping of IAM role ARNs to Kubernetes RBAC groups that grant permissions to
+ # the user.
iam_role_to_rbac_group_mapping = {}
- # Mapping of IAM user ARNs to Kubernetes RBAC groups that grant permissions to the
- # user.
+ # Mapping of IAM user ARNs to Kubernetes RBAC groups that grant permissions to
+ # the user.
iam_user_to_rbac_group_mapping = {}
- # The URL from which to download Kubergrunt if it's not installed already. Use to
- # specify a version of kubergrunt that is compatible with your specified
+ # The URL from which to download Kubergrunt if it's not installed already. Use
+ # to specify a version of kubergrunt that is compatible with your specified
# kubernetes version. Ex.
- # 'https://github.com/gruntwork-io/kubergrunt/releases/download/v0.11.1/kubergrunt
+ # 'https://github.com/gruntwork-io/kubergrunt/releases/download/v0.11.1/kubergrunt'
kubergrunt_download_url = "https://github.com/gruntwork-io/kubergrunt/releases/download/v0.11.1/kubergrunt"
- # Version of Kubernetes to use. Refer to EKS docs for list of available versions
+ # Version of Kubernetes to use. Refer to EKS docs for list of available
+ # versions
# (https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html).
kubernetes_version = "1.25"
- # Configure one or more Node Groups to manage the EC2 instances in this cluster.
- # Set to empty object ({}) if you do not wish to configure managed node groups.
+ # Configure one or more Node Groups to manage the EC2 instances in this
+ # cluster. Set to empty object ({}) if you do not wish to configure managed
+ # node groups.
managed_node_group_configurations = {}
# Default value for capacity_type field of managed_node_group_configurations.
@@ -560,8 +565,8 @@ module "eks_cluster" {
node_group_default_instance_types = null
# Default value for labels field of managed_node_group_configurations. Unlike
- # common_labels which will always be merged in, these labels are only used if the
- # labels field is omitted from the configuration.
+ # common_labels which will always be merged in, these labels are only used if
+ # the labels field is omitted from the configuration.
node_group_default_labels = {}
# Default value for the max_pods_allowed field of
@@ -579,33 +584,33 @@ module "eks_cluster" {
node_group_default_subnet_ids = null
# Default value for tags field of managed_node_group_configurations. Unlike
- # common_tags which will always be merged in, these tags are only used if the tags
- # field is omitted from the configuration.
+ # common_tags which will always be merged in, these tags are only used if the
+ # tags field is omitted from the configuration.
node_group_default_tags = {}
# Default value for taint field of node_group_configurations. These taints are
# only used if the taint field is omitted from the configuration.
node_group_default_taints = []
- # ARN of a permission boundary to apply on the IAM role created for the managed
- # node groups.
+ # ARN of a permission boundary to apply on the IAM role created for the
+ # managed node groups.
node_group_iam_permissions_boundary = null
- # The instance type to configure in the launch template. This value will be used
- # when the instance_types field is set to null (NOT omitted, in which case
- # var.node_group_default_instance_types will be used).
+ # The instance type to configure in the launch template. This value will be
+ # used when the instance_types field is set to null (NOT omitted, in which
+ # case var.node_group_default_instance_types will be used).
node_group_launch_template_instance_type = null
- # Tags assigned to a node group are mirrored to the underlaying autoscaling group
- # by default. If you want to disable this behaviour, set this flag to false. Note
- # that this assumes that there is a one-to-one mappping between ASGs and Node
- # Groups. If there is more than one ASG mapped to the Node Group, then this will
- # only apply the tags on the first one. Due to a limitation in Terraform for_each
- # where it can not depend on dynamic data, it is currently not possible in the
- # module to map the tags to all ASGs. If you wish to apply the tags to all
- # underlying ASGs, then it is recommended to call the aws_autoscaling_group_tag
- # resource in a separate terraform state file outside of this module, or use a
- # two-stage apply process.
+ # Tags assigned to a node group are mirrored to the underlaying autoscaling
+ # group by default. If you want to disable this behaviour, set this flag to
+ # false. Note that this assumes that there is a one-to-one mappping between
+ # ASGs and Node Groups. If there is more than one ASG mapped to the Node
+ # Group, then this will only apply the tags on the first one. Due to a
+ # limitation in Terraform for_each where it can not depend on dynamic data, it
+ # is currently not possible in the module to map the tags to all ASGs. If you
+ # wish to apply the tags to all underlying ASGs, then it is recommended to
+ # call the aws_autoscaling_group_tag resource in a separate terraform state
+ # file outside of this module, or use a two-stage apply process.
node_group_mirror_tags_to_asg = true
# A map of tags to apply to the Security Group of the ASG for the managed node
@@ -613,17 +618,17 @@ module "eks_cluster" {
node_group_security_group_tags = {}
# Number of subnets provided in the var.control_plane_vpc_subnet_ids variable.
- # When null (default), this is computed dynamically from the list. This is used to
- # workaround terraform limitations where resource count and for_each can not
- # depend on dynamic resources (e.g., if you are creating the subnets and the EKS
- # cluster in the same module).
+ # When null (default), this is computed dynamically from the list. This is
+ # used to workaround terraform limitations where resource count and for_each
+ # can not depend on dynamic resources (e.g., if you are creating the subnets
+ # and the EKS cluster in the same module).
num_control_plane_vpc_subnet_ids = null
- # Number of subnets provided in the var.worker_vpc_subnet_ids variable. When null
- # (default), this is computed dynamically from the list. This is used to
+ # Number of subnets provided in the var.worker_vpc_subnet_ids variable. When
+ # null (default), this is computed dynamically from the list. This is used to
# workaround terraform limitations where resource count and for_each can not
- # depend on dynamic resources (e.g., if you are creating the subnets and the EKS
- # cluster in the same module).
+ # depend on dynamic resources (e.g., if you are creating the subnets and the
+ # EKS cluster in the same module).
num_worker_vpc_subnet_ids = null
# When true, configures control plane services to run on Fargate so that the
@@ -631,103 +636,108 @@ module "eks_cluster" {
# available on the system, and create_default_fargate_iam_role be set to true.
schedule_control_plane_services_on_fargate = false
- # ARN for KMS Key to use for envelope encryption of Kubernetes Secrets. By default
- # Secrets in EKS are encrypted at rest at the EBS layer in the managed etcd
- # cluster using shared AWS managed keys. Setting this variable will configure
- # Kubernetes to use envelope encryption to encrypt Secrets using this KMS key on
- # top of the EBS layer encryption.
+ # ARN for KMS Key to use for envelope encryption of Kubernetes Secrets. By
+ # default Secrets in EKS are encrypted at rest at the EBS layer in the managed
+ # etcd cluster using shared AWS managed keys. Setting this variable will
+ # configure Kubernetes to use envelope encryption to encrypt Secrets using
+ # this KMS key on top of the EBS layer encryption.
secret_envelope_encryption_kms_key_arn = null
# When true, precreate the CloudWatch Log Group to use for EKS control plane
- # logging. This is useful if you wish to customize the CloudWatch Log Group with
- # various settings such as retention periods and KMS encryption. When false, EKS
- # will automatically create a basic log group to use. Note that logs are only
- # streamed to this group if var.enabled_cluster_log_types is true.
+ # logging. This is useful if you wish to customize the CloudWatch Log Group
+ # with various settings such as retention periods and KMS encryption. When
+ # false, EKS will automatically create a basic log group to use. Note that
+ # logs are only streamed to this group if var.enabled_cluster_log_types is
+ # true.
should_create_control_plane_cloudwatch_log_group = true
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to the EKS workers. To omit this variable, set it to an
- # empty string (do NOT use null, or Terraform will complain).
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to the EKS workers. To omit this variable, set
+ # it to an empty string (do NOT use null, or Terraform will complain).
ssh_grunt_iam_group = "ssh-grunt-users"
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to the EKS workers with sudo permissions. To omit this
- # variable, set it to an empty string (do NOT use null, or Terraform will
- # complain).
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to the EKS workers with sudo permissions. To
+ # omit this variable, set it to an empty string (do NOT use null, or Terraform
+ # will complain).
ssh_grunt_iam_group_sudo = "ssh-grunt-sudo-users"
# The tenancy of this server. Must be one of: default, dedicated, or host.
tenancy = "default"
- # If this variable is set to true, then use an exec-based plugin to authenticate
- # and fetch tokens for EKS. This is useful because EKS clusters use short-lived
- # authentication tokens that can expire in the middle of an 'apply' or 'destroy',
- # and since the native Kubernetes provider in Terraform doesn't have a way to
- # fetch up-to-date tokens, we recommend using an exec-based provider as a
- # workaround. Use the use_kubergrunt_to_fetch_token input variable to control
- # whether kubergrunt or aws is used to fetch tokens.
+ # If this variable is set to true, then use an exec-based plugin to
+ # authenticate and fetch tokens for EKS. This is useful because EKS clusters
+ # use short-lived authentication tokens that can expire in the middle of an
+ # 'apply' or 'destroy', and since the native Kubernetes provider in Terraform
+ # doesn't have a way to fetch up-to-date tokens, we recommend using an
+ # exec-based provider as a workaround. Use the use_kubergrunt_to_fetch_token
+ # input variable to control whether kubergrunt or aws is used to fetch tokens.
use_exec_plugin_for_auth = true
- # Set this variable to true to enable the use of Instance Metadata Service Version
- # 1 in this module's aws_launch_template. Note that while IMDsv2 is preferred due
- # to its special security hardening, we allow this in order to support the use
- # case of AMIs built outside of these modules that depend on IMDSv1.
+ # Set this variable to true to enable the use of Instance Metadata Service
+ # Version 1 in this module's aws_launch_template. Note that while IMDsv2 is
+ # preferred due to its special security hardening, we allow this in order to
+ # support the use case of AMIs built outside of these modules that depend on
+ # IMDSv1.
use_imdsv1 = false
- # When set to true, this will enable kubergrunt based component syncing. This step
- # ensures that the core EKS components that are installed are upgraded to a
- # matching version everytime the cluster's Kubernetes version is updated.
+ # When set to true, this will enable kubergrunt based component syncing. This
+ # step ensures that the core EKS components that are installed are upgraded to
+ # a matching version everytime the cluster's Kubernetes version is updated.
use_kubergrunt_sync_components = true
- # EKS clusters use short-lived authentication tokens that can expire in the middle
- # of an 'apply' or 'destroy'. To avoid this issue, we use an exec-based plugin to
- # fetch an up-to-date token. If this variable is set to true, we'll use kubergrunt
- # to fetch the token (in which case, kubergrunt must be installed and on PATH); if
- # this variable is set to false, we'll use the aws CLI to fetch the token (in
- # which case, aws must be installed and on PATH). Note this functionality is only
- # enabled if use_exec_plugin_for_auth is set to true.
+ # EKS clusters use short-lived authentication tokens that can expire in the
+ # middle of an 'apply' or 'destroy'. To avoid this issue, we use an exec-based
+ # plugin to fetch an up-to-date token. If this variable is set to true, we'll
+ # use kubergrunt to fetch the token (in which case, kubergrunt must be
+ # installed and on PATH); if this variable is set to false, we'll use the aws
+ # CLI to fetch the token (in which case, aws must be installed and on PATH).
+ # Note this functionality is only enabled if use_exec_plugin_for_auth is set
+ # to true.
use_kubergrunt_to_fetch_token = true
# When set to true, this will enable kubergrunt verification to wait for the
- # Kubernetes API server to come up before completing. If false, reverts to a 30
- # second timed wait instead.
+ # Kubernetes API server to come up before completing. If false, reverts to a
+ # 30 second timed wait instead.
use_kubergrunt_verification = true
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
- # When set to true, this will enable management of the aws-vpc-cni configuration
- # options using kubergrunt running as a local-exec provisioner. If you set this to
- # false, the vpc_cni_* variables will be ignored.
+ # When set to true, this will enable management of the aws-vpc-cni
+ # configuration options using kubergrunt running as a local-exec provisioner.
+ # If you set this to false, the vpc_cni_* variables will be ignored.
use_vpc_cni_customize_script = true
- # When true, enable prefix delegation mode for the AWS VPC CNI component of the
- # EKS cluster. In prefix delegation mode, each ENI will be allocated 16 IP
- # addresses (/28) instead of 1, allowing you to pack more Pods per node. Note that
- # by default, AWS VPC CNI will always preallocate 1 full prefix - this means that
- # you can potentially take up 32 IP addresses from the VPC network space even if
- # you only have 1 Pod on the node. You can tweak this behavior by configuring the
- # var.vpc_cni_warm_ip_target input variable.
+ # When true, enable prefix delegation mode for the AWS VPC CNI component of
+ # the EKS cluster. In prefix delegation mode, each ENI will be allocated 16 IP
+ # addresses (/28) instead of 1, allowing you to pack more Pods per node. Note
+ # that by default, AWS VPC CNI will always preallocate 1 full prefix - this
+ # means that you can potentially take up 32 IP addresses from the VPC network
+ # space even if you only have 1 Pod on the node. You can tweak this behavior
+ # by configuring the var.vpc_cni_warm_ip_target input variable.
vpc_cni_enable_prefix_delegation = true
- # The minimum number of IP addresses (free and used) each node should start with.
- # When null, defaults to the aws-vpc-cni application setting (currently 16 as of
- # version 1.9.0). For example, if this is set to 25, every node will allocate 2
- # prefixes (32 IP addresses). On the other hand, if this was set to the default
- # value, then each node will allocate only 1 prefix (16 IP addresses).
+ # The minimum number of IP addresses (free and used) each node should start
+ # with. When null, defaults to the aws-vpc-cni application setting (currently
+ # 16 as of version 1.9.0). For example, if this is set to 25, every node will
+ # allocate 2 prefixes (32 IP addresses). On the other hand, if this was set to
+ # the default value, then each node will allocate only 1 prefix (16 IP
+ # addresses).
vpc_cni_minimum_ip_target = null
- # The number of free IP addresses each node should maintain. When null, defaults
- # to the aws-vpc-cni application setting (currently 16 as of version 1.9.0). In
- # prefix delegation mode, determines whether the node will preallocate another
- # full prefix. For example, if this is set to 5 and a node is currently has 9 Pods
- # scheduled, then the node will NOT preallocate a new prefix block of 16 IP
- # addresses. On the other hand, if this was set to the default value, then the
- # node will allocate a new block when the first pod is scheduled.
+ # The number of free IP addresses each node should maintain. When null,
+ # defaults to the aws-vpc-cni application setting (currently 16 as of version
+ # 1.9.0). In prefix delegation mode, determines whether the node will
+ # preallocate another full prefix. For example, if this is set to 5 and a node
+ # is currently has 9 Pods scheduled, then the node will NOT preallocate a new
+ # prefix block of 16 IP addresses. On the other hand, if this was set to the
+ # default value, then the node will allocate a new block when the first pod is
+ # scheduled.
vpc_cni_warm_ip_target = null
# The ID (ARN, alias ARN, AWS ID) of a customer managed KMS Key to use for
@@ -735,20 +745,19 @@ module "eks_cluster" {
# var.enable_worker_cloudwatch_log_aggregation is true.
worker_cloudwatch_log_group_kms_key_id = null
- # Name of the CloudWatch Log Group where worker system logs are reported to. Only
- # used if var.enable_worker_cloudwatch_log_aggregation is true.
+ # Name of the CloudWatch Log Group where worker system logs are reported to.
+ # Only used if var.enable_worker_cloudwatch_log_aggregation is true.
worker_cloudwatch_log_group_name = null
# The number of days to retain log events in the worker system logs log group.
# Refer to
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # watch_log_group#retention_in_days for all the valid values. When null, the log
- # events are retained forever. Only used if
- # var.enable_worker_cloudwatch_log_aggregation is true.
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group#retention_in_days
+ # for all the valid values. When null, the log events are retained forever.
+ # Only used if var.enable_worker_cloudwatch_log_aggregation is true.
worker_cloudwatch_log_group_retention_in_days = null
- # Tags to apply on the worker system logs CloudWatch Log Group, encoded as a map
- # where the keys are tag keys and values are tag values. Only used if
+ # Tags to apply on the worker system logs CloudWatch Log Group, encoded as a
+ # map where the keys are tag keys and values are tag values. Only used if
# var.enable_worker_cloudwatch_log_aggregation is true.
worker_cloudwatch_log_group_tags = null
@@ -756,14 +765,15 @@ module "eks_cluster" {
# mapped as Kubernetes Nodes.
worker_iam_role_arns_for_k8s_role_mapping = []
- # Prefix EKS worker resource names with this string. When you have multiple worker
- # groups for the cluster, you can use this to namespace the resources. Defaults to
- # empty string so that resource names are not excessively long by default.
+ # Prefix EKS worker resource names with this string. When you have multiple
+ # worker groups for the cluster, you can use this to namespace the resources.
+ # Defaults to empty string so that resource names are not excessively long by
+ # default.
worker_name_prefix = ""
- # A list of the subnets into which the EKS Cluster's administrative pods will be
- # launched. These should usually be all private subnets and include one in each
- # AWS Availability Zone. Required when
+ # A list of the subnets into which the EKS Cluster's administrative pods will
+ # be launched. These should usually be all private subnets and include one in
+ # each AWS Availability Zone. Required when
# var.schedule_control_plane_services_on_fargate is true.
worker_vpc_subnet_ids = []
@@ -782,7 +792,7 @@ module "eks_cluster" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/eks-cluster?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/eks-cluster?ref=v0.104.12"
}
inputs = {
@@ -813,22 +823,24 @@ inputs = {
# A list of additional security group IDs to attach to the worker nodes.
additional_security_groups_for_workers = []
- # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and disk
- # space usage) should send notifications.
+ # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
+ # disk space usage) should send notifications.
alarms_sns_topic_arn = []
# The list of CIDR blocks to allow inbound SSH access to the worker groups.
allow_inbound_ssh_from_cidr_blocks = []
- # The list of security group IDs to allow inbound SSH access to the worker groups.
+ # The list of security group IDs to allow inbound SSH access to the worker
+ # groups.
allow_inbound_ssh_from_security_groups = []
- # The list of CIDR blocks to allow inbound access to the private Kubernetes API
- # endpoint (e.g. the endpoint within the VPC, not the public endpoint).
+ # The list of CIDR blocks to allow inbound access to the private Kubernetes
+ # API endpoint (e.g. the endpoint within the VPC, not the public endpoint).
allow_private_api_access_from_cidr_blocks = []
- # The list of security groups to allow inbound access to the private Kubernetes
- # API endpoint (e.g. the endpoint within the VPC, not the public endpoint).
+ # The list of security groups to allow inbound access to the private
+ # Kubernetes API endpoint (e.g. the endpoint within the VPC, not the public
+ # endpoint).
allow_private_api_access_from_security_groups = []
# Default value for enable_detailed_monitoring field of
@@ -874,12 +886,12 @@ inputs = {
# max_pods_allowed will use this value.
asg_default_max_pods_allowed = null
- # Default value for the max_size field of autoscaling_group_configurations. Any
- # map entry that does not specify max_size will use this value.
+ # Default value for the max_size field of autoscaling_group_configurations.
+ # Any map entry that does not specify max_size will use this value.
asg_default_max_size = 2
- # Default value for the min_size field of autoscaling_group_configurations. Any
- # map entry that does not specify min_size will use this value.
+ # Default value for the min_size field of autoscaling_group_configurations.
+ # Any map entry that does not specify min_size will use this value.
asg_default_min_size = 1
# Default value for the multi_instance_overrides field of
@@ -912,13 +924,14 @@ inputs = {
# spot_instance_pools will use this value.
asg_default_spot_instance_pools = null
- # Default value for the spot_max_price field of autoscaling_group_configurations.
- # Any map entry that does not specify spot_max_price will use this value. Set to
- # empty string (default) to mean on-demand price.
+ # Default value for the spot_max_price field of
+ # autoscaling_group_configurations. Any map entry that does not specify
+ # spot_max_price will use this value. Set to empty string (default) to mean
+ # on-demand price.
asg_default_spot_max_price = null
- # Default value for the tags field of autoscaling_group_configurations. Any map
- # entry that does not specify tags will use this value.
+ # Default value for the tags field of autoscaling_group_configurations. Any
+ # map entry that does not specify tags will use this value.
asg_default_tags = []
# Default value for the use_multi_instances_policy field of
@@ -939,16 +952,17 @@ inputs = {
# worker pool. The key is the tag name and the value is the tag value.
asg_security_group_tags = {}
- # When true, all the relevant resources for self managed workers will be set to
- # use the name_prefix attribute so that unique names are generated for them. This
- # allows those resources to support recreation through create_before_destroy
- # lifecycle rules. Set to false if you were using any version before 0.65.0 and
- # wish to avoid recreating the entire worker pool on your cluster.
+ # When true, all the relevant resources for self managed workers will be set
+ # to use the name_prefix attribute so that unique names are generated for
+ # them. This allows those resources to support recreation through
+ # create_before_destroy lifecycle rules. Set to false if you were using any
+ # version before 0.65.0 and wish to avoid recreating the entire worker pool on
+ # your cluster.
asg_use_resource_name_prefix = true
- # Configure one or more Auto Scaling Groups (ASGs) to manage the EC2 instances in
- # this cluster. If any of the values are not provided, the specified default
- # variable will be used to lookup a default value.
+ # Configure one or more Auto Scaling Groups (ASGs) to manage the EC2 instances
+ # in this cluster. If any of the values are not provided, the specified
+ # default variable will be used to lookup a default value.
autoscaling_group_configurations = {}
# Adds additional tags to each ASG that allow a cluster autoscaler to
@@ -956,34 +970,34 @@ inputs = {
autoscaling_group_include_autoscaler_discovery_tags = true
# Name of the default aws-auth ConfigMap to use. This will be the name of the
- # ConfigMap that gets created by this module in the aws-auth-merger namespace to
- # seed the initial aws-auth ConfigMap.
+ # ConfigMap that gets created by this module in the aws-auth-merger namespace
+ # to seed the initial aws-auth ConfigMap.
aws_auth_merger_default_configmap_name = "main-aws-auth"
- # Location of the container image to use for the aws-auth-merger app. You can use
- # the Dockerfile provided in terraform-aws-eks to construct an image. See
- # https://github.com/gruntwork-io/terraform-aws-eks/blob/master/modules/eks-aws-au
- # h-merger/core-concepts.md#how-do-i-use-the-aws-auth-merger for more info.
+ # Location of the container image to use for the aws-auth-merger app. You can
+ # use the Dockerfile provided in terraform-aws-eks to construct an image. See
+ # https://github.com/gruntwork-io/terraform-aws-eks/blob/master/modules/eks-aws-auth-merger/core-concepts.md#how-do-i-use-the-aws-auth-merger
+ # for more info.
aws_auth_merger_image = null
- # Namespace to deploy the aws-auth-merger into. The app will watch for ConfigMaps
- # in this Namespace to merge into the aws-auth ConfigMap.
+ # Namespace to deploy the aws-auth-merger into. The app will watch for
+ # ConfigMaps in this Namespace to merge into the aws-auth ConfigMap.
aws_auth_merger_namespace = "aws-auth-merger"
- # Cloud init scripts to run on the EKS worker nodes when it is booting. See the
- # part blocks in
+ # Cloud init scripts to run on the EKS worker nodes when it is booting. See
+ # the part blocks in
# https://www.terraform.io/docs/providers/template/d/cloudinit_config.html for
- # syntax. To override the default boot script installed as part of the module, use
- # the key `default`.
+ # syntax. To override the default boot script installed as part of the module,
+ # use the key `default`.
cloud_init_parts = {}
# ARN of permissions boundary to apply to the cluster IAM role - the IAM role
# created for the EKS cluster.
cluster_iam_role_permissions_boundary = null
- # The AMI to run on each instance in the EKS cluster. You can build the AMI using
- # the Packer template eks-node-al2.json. One of var.cluster_instance_ami or
- # var.cluster_instance_ami_filters is required. Only used if
+ # The AMI to run on each instance in the EKS cluster. You can build the AMI
+ # using the Packer template eks-node-al2.json. One of var.cluster_instance_ami
+ # or var.cluster_instance_ami_filters is required. Only used if
# var.cluster_instance_ami_filters is null. Set to null if
# cluster_instance_ami_filters is set.
cluster_instance_ami = null
@@ -997,7 +1011,8 @@ inputs = {
cluster_instance_ami_filters = null
# Whether or not to associate a public IP address to the instances of the self
- # managed ASGs. Will only work if the instances are launched in a public subnet.
+ # managed ASGs. Will only work if the instances are launched in a public
+ # subnet.
cluster_instance_associate_public_ip_address = false
# The name of the Key Pair that can be used to SSH to each instance in the EKS
@@ -1010,27 +1025,26 @@ inputs = {
# The number of days to retain log events in the CloudWatch log group for EKS
# control plane logs. Refer to
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # watch_log_group#retention_in_days for all the valid values. When null, the log
- # events are retained forever.
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group#retention_in_days
+ # for all the valid values. When null, the log events are retained forever.
control_plane_cloudwatch_log_group_retention_in_days = null
- # Tags to apply on the CloudWatch Log Group for EKS control plane logs, encoded as
- # a map where the keys are tag keys and values are tag values.
+ # Tags to apply on the CloudWatch Log Group for EKS control plane logs,
+ # encoded as a map where the keys are tag keys and values are tag values.
control_plane_cloudwatch_log_group_tags = null
- # A list of availability zones in the region that we CANNOT use to deploy the EKS
- # control plane. You can use this to avoid availability zones that may not be able
- # to provision the resources (e.g ran out of capacity). If empty, will allow all
- # availability zones.
+ # A list of availability zones in the region that we CANNOT use to deploy the
+ # EKS control plane. You can use this to avoid availability zones that may not
+ # be able to provision the resources (e.g ran out of capacity). If empty, will
+ # allow all availability zones.
control_plane_disallowed_availability_zones = ["us-east-1e"]
# When true, IAM role will be created and attached to Fargate control plane
# services.
create_default_fargate_iam_role = true
- # The name to use for the default Fargate execution IAM role that is created when
- # create_default_fargate_iam_role is true. When null, defaults to
+ # The name to use for the default Fargate execution IAM role that is created
+ # when create_default_fargate_iam_role is true. When null, defaults to
# CLUSTER_NAME-fargate-role.
custom_default_fargate_iam_role_name = null
@@ -1046,24 +1060,24 @@ inputs = {
# dashboard.
dashboard_cpu_usage_widget_parameters = {"height":6,"period":60,"width":8}
- # Parameters for the worker disk usage widget to output for use in a CloudWatch
- # dashboard.
+ # Parameters for the worker disk usage widget to output for use in a
+ # CloudWatch dashboard.
dashboard_disk_usage_widget_parameters = {"height":6,"period":60,"width":8}
- # Parameters for the worker memory usage widget to output for use in a CloudWatch
- # dashboard.
+ # Parameters for the worker memory usage widget to output for use in a
+ # CloudWatch dashboard.
dashboard_memory_usage_widget_parameters = {"height":6,"period":60,"width":8}
- # Map of EKS add-ons, where key is name of the add-on and value is a map of add-on
- # properties.
+ # Map of EKS add-ons, where key is name of the add-on and value is a map of
+ # add-on properties.
eks_addons = {}
- # A map of custom tags to apply to the Security Group for the EKS Cluster Control
- # Plane. The key is the tag name and the value is the tag value.
+ # A map of custom tags to apply to the Security Group for the EKS Cluster
+ # Control Plane. The key is the tag name and the value is the tag value.
eks_cluster_security_group_tags = {}
- # A map of custom tags to apply to the EKS Cluster Control Plane. The key is the
- # tag name and the value is the tag value.
+ # A map of custom tags to apply to the EKS Cluster Control Plane. The key is
+ # the tag name and the value is the tag value.
eks_cluster_tags = {}
# If set to true, installs the aws-auth-merger to manage the aws-auth
@@ -1071,28 +1085,28 @@ inputs = {
# variable.
enable_aws_auth_merger = false
- # When true, deploy the aws-auth-merger into Fargate. It is recommended to run the
- # aws-auth-merger on Fargate to avoid chicken and egg issues between the
+ # When true, deploy the aws-auth-merger into Fargate. It is recommended to run
+ # the aws-auth-merger on Fargate to avoid chicken and egg issues between the
# aws-auth-merger and having an authenticated worker pool.
enable_aws_auth_merger_fargate = true
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arn.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arn.
enable_cloudwatch_alarms = true
- # Set to true to add IAM permissions to send custom metrics to CloudWatch. This is
- # useful in combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/age
- # ts/cloudwatch-agent to get memory and disk metrics in CloudWatch for your
- # Bastion host.
+ # Set to true to add IAM permissions to send custom metrics to CloudWatch.
+ # This is useful in combination with
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/agents/cloudwatch-agent
+ # to get memory and disk metrics in CloudWatch for your Bastion host.
enable_cloudwatch_metrics = true
# When set to true, the module configures EKS add-ons
- # (https://docs.aws.amazon.com/eks/latest/userguide/eks-add-ons.html) specified
- # with `eks_addons`. VPC CNI configurations with `use_vpc_cni_customize_script`
- # isn't fully supported with addons, as the automated add-on lifecycles could
- # potentially undo the configuration changes.
+ # (https://docs.aws.amazon.com/eks/latest/userguide/eks-add-ons.html)
+ # specified with `eks_addons`. VPC CNI configurations with
+ # `use_vpc_cni_customize_script` isn't fully supported with addons, as the
+ # automated add-on lifecycles could potentially undo the configuration
+ # changes.
enable_eks_addons = false
# Enable fail2ban to block brute force log in attempts. Defaults to true.
@@ -1100,52 +1114,51 @@ inputs = {
# Set to true to send worker system logs to CloudWatch. This is useful in
# combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/log
- # /cloudwatch-log-aggregation-scripts to do log aggregation in CloudWatch. Note
- # that this is only recommended for aggregating system level logs from the server
- # instances. Container logs should be managed through fluent-bit deployed with
- # eks-core-services.
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/logs/cloudwatch-log-aggregation-scripts
+ # to do log aggregation in CloudWatch. Note that this is only recommended for
+ # aggregating system level logs from the server instances. Container logs
+ # should be managed through fluent-bit deployed with eks-core-services.
enable_worker_cloudwatch_log_aggregation = false
# A list of the desired control plane logging to enable. See
- # https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html for the
- # list of available logs.
+ # https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html for
+ # the list of available logs.
enabled_control_plane_log_types = ["api","audit","authenticator"]
# Whether or not to enable public API endpoints which allow access to the
- # Kubernetes API from outside of the VPC. Note that private access within the VPC
- # is always enabled.
+ # Kubernetes API from outside of the VPC. Note that private access within the
+ # VPC is always enabled.
endpoint_public_access = true
- # If you are using ssh-grunt and your IAM users / groups are defined in a separate
- # AWS account, you can use this variable to specify the ARN of an IAM role that
- # ssh-grunt can assume to retrieve IAM group and public SSH key info from that
- # account. To omit this variable, set it to an empty string (do NOT use null, or
- # Terraform will complain).
+ # If you are using ssh-grunt and your IAM users / groups are defined in a
+ # separate AWS account, you can use this variable to specify the ARN of an IAM
+ # role that ssh-grunt can assume to retrieve IAM group and public SSH key info
+ # from that account. To omit this variable, set it to an empty string (do NOT
+ # use null, or Terraform will complain).
external_account_ssh_grunt_role_arn = ""
- # List of ARNs of AWS IAM roles corresponding to Fargate Profiles that should be
- # mapped as Kubernetes Nodes.
+ # List of ARNs of AWS IAM roles corresponding to Fargate Profiles that should
+ # be mapped as Kubernetes Nodes.
fargate_profile_executor_iam_role_arns_for_k8s_role_mapping = []
- # A list of availability zones in the region that we CANNOT use to deploy the EKS
- # Fargate workers. You can use this to avoid availability zones that may not be
- # able to provision the resources (e.g ran out of capacity). If empty, will allow
- # all availability zones.
+ # A list of availability zones in the region that we CANNOT use to deploy the
+ # EKS Fargate workers. You can use this to avoid availability zones that may
+ # not be able to provision the resources (e.g ran out of capacity). If empty,
+ # will allow all availability zones.
fargate_worker_disallowed_availability_zones = ["us-east-1d","us-east-1e","ca-central-1d"]
- # The period, in seconds, over which to measure the CPU utilization percentage for
- # the ASG.
+ # The period, in seconds, over which to measure the CPU utilization percentage
+ # for the ASG.
high_worker_cpu_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster CPU utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster CPU utilization
+ # percentage above this threshold.
high_worker_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_worker_cpu_utilization_treat_missing_data = "missing"
# The period, in seconds, over which to measure the root disk utilization
@@ -1156,46 +1169,48 @@ inputs = {
# percentage above this threshold.
high_worker_disk_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_worker_disk_utilization_treat_missing_data = "missing"
- # The period, in seconds, over which to measure the Memory utilization percentage
- # for the ASG.
+ # The period, in seconds, over which to measure the Memory utilization
+ # percentage for the ASG.
high_worker_memory_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster Memory utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster Memory utilization
+ # percentage above this threshold.
high_worker_memory_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_worker_memory_utilization_treat_missing_data = "missing"
- # Mapping of IAM role ARNs to Kubernetes RBAC groups that grant permissions to the
- # user.
+ # Mapping of IAM role ARNs to Kubernetes RBAC groups that grant permissions to
+ # the user.
iam_role_to_rbac_group_mapping = {}
- # Mapping of IAM user ARNs to Kubernetes RBAC groups that grant permissions to the
- # user.
+ # Mapping of IAM user ARNs to Kubernetes RBAC groups that grant permissions to
+ # the user.
iam_user_to_rbac_group_mapping = {}
- # The URL from which to download Kubergrunt if it's not installed already. Use to
- # specify a version of kubergrunt that is compatible with your specified
+ # The URL from which to download Kubergrunt if it's not installed already. Use
+ # to specify a version of kubergrunt that is compatible with your specified
# kubernetes version. Ex.
- # 'https://github.com/gruntwork-io/kubergrunt/releases/download/v0.11.1/kubergrunt
+ # 'https://github.com/gruntwork-io/kubergrunt/releases/download/v0.11.1/kubergrunt'
kubergrunt_download_url = "https://github.com/gruntwork-io/kubergrunt/releases/download/v0.11.1/kubergrunt"
- # Version of Kubernetes to use. Refer to EKS docs for list of available versions
+ # Version of Kubernetes to use. Refer to EKS docs for list of available
+ # versions
# (https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html).
kubernetes_version = "1.25"
- # Configure one or more Node Groups to manage the EC2 instances in this cluster.
- # Set to empty object ({}) if you do not wish to configure managed node groups.
+ # Configure one or more Node Groups to manage the EC2 instances in this
+ # cluster. Set to empty object ({}) if you do not wish to configure managed
+ # node groups.
managed_node_group_configurations = {}
# Default value for capacity_type field of managed_node_group_configurations.
@@ -1229,8 +1244,8 @@ inputs = {
node_group_default_instance_types = null
# Default value for labels field of managed_node_group_configurations. Unlike
- # common_labels which will always be merged in, these labels are only used if the
- # labels field is omitted from the configuration.
+ # common_labels which will always be merged in, these labels are only used if
+ # the labels field is omitted from the configuration.
node_group_default_labels = {}
# Default value for the max_pods_allowed field of
@@ -1248,33 +1263,33 @@ inputs = {
node_group_default_subnet_ids = null
# Default value for tags field of managed_node_group_configurations. Unlike
- # common_tags which will always be merged in, these tags are only used if the tags
- # field is omitted from the configuration.
+ # common_tags which will always be merged in, these tags are only used if the
+ # tags field is omitted from the configuration.
node_group_default_tags = {}
# Default value for taint field of node_group_configurations. These taints are
# only used if the taint field is omitted from the configuration.
node_group_default_taints = []
- # ARN of a permission boundary to apply on the IAM role created for the managed
- # node groups.
+ # ARN of a permission boundary to apply on the IAM role created for the
+ # managed node groups.
node_group_iam_permissions_boundary = null
- # The instance type to configure in the launch template. This value will be used
- # when the instance_types field is set to null (NOT omitted, in which case
- # var.node_group_default_instance_types will be used).
+ # The instance type to configure in the launch template. This value will be
+ # used when the instance_types field is set to null (NOT omitted, in which
+ # case var.node_group_default_instance_types will be used).
node_group_launch_template_instance_type = null
- # Tags assigned to a node group are mirrored to the underlaying autoscaling group
- # by default. If you want to disable this behaviour, set this flag to false. Note
- # that this assumes that there is a one-to-one mappping between ASGs and Node
- # Groups. If there is more than one ASG mapped to the Node Group, then this will
- # only apply the tags on the first one. Due to a limitation in Terraform for_each
- # where it can not depend on dynamic data, it is currently not possible in the
- # module to map the tags to all ASGs. If you wish to apply the tags to all
- # underlying ASGs, then it is recommended to call the aws_autoscaling_group_tag
- # resource in a separate terraform state file outside of this module, or use a
- # two-stage apply process.
+ # Tags assigned to a node group are mirrored to the underlaying autoscaling
+ # group by default. If you want to disable this behaviour, set this flag to
+ # false. Note that this assumes that there is a one-to-one mappping between
+ # ASGs and Node Groups. If there is more than one ASG mapped to the Node
+ # Group, then this will only apply the tags on the first one. Due to a
+ # limitation in Terraform for_each where it can not depend on dynamic data, it
+ # is currently not possible in the module to map the tags to all ASGs. If you
+ # wish to apply the tags to all underlying ASGs, then it is recommended to
+ # call the aws_autoscaling_group_tag resource in a separate terraform state
+ # file outside of this module, or use a two-stage apply process.
node_group_mirror_tags_to_asg = true
# A map of tags to apply to the Security Group of the ASG for the managed node
@@ -1282,17 +1297,17 @@ inputs = {
node_group_security_group_tags = {}
# Number of subnets provided in the var.control_plane_vpc_subnet_ids variable.
- # When null (default), this is computed dynamically from the list. This is used to
- # workaround terraform limitations where resource count and for_each can not
- # depend on dynamic resources (e.g., if you are creating the subnets and the EKS
- # cluster in the same module).
+ # When null (default), this is computed dynamically from the list. This is
+ # used to workaround terraform limitations where resource count and for_each
+ # can not depend on dynamic resources (e.g., if you are creating the subnets
+ # and the EKS cluster in the same module).
num_control_plane_vpc_subnet_ids = null
- # Number of subnets provided in the var.worker_vpc_subnet_ids variable. When null
- # (default), this is computed dynamically from the list. This is used to
+ # Number of subnets provided in the var.worker_vpc_subnet_ids variable. When
+ # null (default), this is computed dynamically from the list. This is used to
# workaround terraform limitations where resource count and for_each can not
- # depend on dynamic resources (e.g., if you are creating the subnets and the EKS
- # cluster in the same module).
+ # depend on dynamic resources (e.g., if you are creating the subnets and the
+ # EKS cluster in the same module).
num_worker_vpc_subnet_ids = null
# When true, configures control plane services to run on Fargate so that the
@@ -1300,103 +1315,108 @@ inputs = {
# available on the system, and create_default_fargate_iam_role be set to true.
schedule_control_plane_services_on_fargate = false
- # ARN for KMS Key to use for envelope encryption of Kubernetes Secrets. By default
- # Secrets in EKS are encrypted at rest at the EBS layer in the managed etcd
- # cluster using shared AWS managed keys. Setting this variable will configure
- # Kubernetes to use envelope encryption to encrypt Secrets using this KMS key on
- # top of the EBS layer encryption.
+ # ARN for KMS Key to use for envelope encryption of Kubernetes Secrets. By
+ # default Secrets in EKS are encrypted at rest at the EBS layer in the managed
+ # etcd cluster using shared AWS managed keys. Setting this variable will
+ # configure Kubernetes to use envelope encryption to encrypt Secrets using
+ # this KMS key on top of the EBS layer encryption.
secret_envelope_encryption_kms_key_arn = null
# When true, precreate the CloudWatch Log Group to use for EKS control plane
- # logging. This is useful if you wish to customize the CloudWatch Log Group with
- # various settings such as retention periods and KMS encryption. When false, EKS
- # will automatically create a basic log group to use. Note that logs are only
- # streamed to this group if var.enabled_cluster_log_types is true.
+ # logging. This is useful if you wish to customize the CloudWatch Log Group
+ # with various settings such as retention periods and KMS encryption. When
+ # false, EKS will automatically create a basic log group to use. Note that
+ # logs are only streamed to this group if var.enabled_cluster_log_types is
+ # true.
should_create_control_plane_cloudwatch_log_group = true
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to the EKS workers. To omit this variable, set it to an
- # empty string (do NOT use null, or Terraform will complain).
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to the EKS workers. To omit this variable, set
+ # it to an empty string (do NOT use null, or Terraform will complain).
ssh_grunt_iam_group = "ssh-grunt-users"
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to the EKS workers with sudo permissions. To omit this
- # variable, set it to an empty string (do NOT use null, or Terraform will
- # complain).
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to the EKS workers with sudo permissions. To
+ # omit this variable, set it to an empty string (do NOT use null, or Terraform
+ # will complain).
ssh_grunt_iam_group_sudo = "ssh-grunt-sudo-users"
# The tenancy of this server. Must be one of: default, dedicated, or host.
tenancy = "default"
- # If this variable is set to true, then use an exec-based plugin to authenticate
- # and fetch tokens for EKS. This is useful because EKS clusters use short-lived
- # authentication tokens that can expire in the middle of an 'apply' or 'destroy',
- # and since the native Kubernetes provider in Terraform doesn't have a way to
- # fetch up-to-date tokens, we recommend using an exec-based provider as a
- # workaround. Use the use_kubergrunt_to_fetch_token input variable to control
- # whether kubergrunt or aws is used to fetch tokens.
+ # If this variable is set to true, then use an exec-based plugin to
+ # authenticate and fetch tokens for EKS. This is useful because EKS clusters
+ # use short-lived authentication tokens that can expire in the middle of an
+ # 'apply' or 'destroy', and since the native Kubernetes provider in Terraform
+ # doesn't have a way to fetch up-to-date tokens, we recommend using an
+ # exec-based provider as a workaround. Use the use_kubergrunt_to_fetch_token
+ # input variable to control whether kubergrunt or aws is used to fetch tokens.
use_exec_plugin_for_auth = true
- # Set this variable to true to enable the use of Instance Metadata Service Version
- # 1 in this module's aws_launch_template. Note that while IMDsv2 is preferred due
- # to its special security hardening, we allow this in order to support the use
- # case of AMIs built outside of these modules that depend on IMDSv1.
+ # Set this variable to true to enable the use of Instance Metadata Service
+ # Version 1 in this module's aws_launch_template. Note that while IMDsv2 is
+ # preferred due to its special security hardening, we allow this in order to
+ # support the use case of AMIs built outside of these modules that depend on
+ # IMDSv1.
use_imdsv1 = false
- # When set to true, this will enable kubergrunt based component syncing. This step
- # ensures that the core EKS components that are installed are upgraded to a
- # matching version everytime the cluster's Kubernetes version is updated.
+ # When set to true, this will enable kubergrunt based component syncing. This
+ # step ensures that the core EKS components that are installed are upgraded to
+ # a matching version everytime the cluster's Kubernetes version is updated.
use_kubergrunt_sync_components = true
- # EKS clusters use short-lived authentication tokens that can expire in the middle
- # of an 'apply' or 'destroy'. To avoid this issue, we use an exec-based plugin to
- # fetch an up-to-date token. If this variable is set to true, we'll use kubergrunt
- # to fetch the token (in which case, kubergrunt must be installed and on PATH); if
- # this variable is set to false, we'll use the aws CLI to fetch the token (in
- # which case, aws must be installed and on PATH). Note this functionality is only
- # enabled if use_exec_plugin_for_auth is set to true.
+ # EKS clusters use short-lived authentication tokens that can expire in the
+ # middle of an 'apply' or 'destroy'. To avoid this issue, we use an exec-based
+ # plugin to fetch an up-to-date token. If this variable is set to true, we'll
+ # use kubergrunt to fetch the token (in which case, kubergrunt must be
+ # installed and on PATH); if this variable is set to false, we'll use the aws
+ # CLI to fetch the token (in which case, aws must be installed and on PATH).
+ # Note this functionality is only enabled if use_exec_plugin_for_auth is set
+ # to true.
use_kubergrunt_to_fetch_token = true
# When set to true, this will enable kubergrunt verification to wait for the
- # Kubernetes API server to come up before completing. If false, reverts to a 30
- # second timed wait instead.
+ # Kubernetes API server to come up before completing. If false, reverts to a
+ # 30 second timed wait instead.
use_kubergrunt_verification = true
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
- # When set to true, this will enable management of the aws-vpc-cni configuration
- # options using kubergrunt running as a local-exec provisioner. If you set this to
- # false, the vpc_cni_* variables will be ignored.
+ # When set to true, this will enable management of the aws-vpc-cni
+ # configuration options using kubergrunt running as a local-exec provisioner.
+ # If you set this to false, the vpc_cni_* variables will be ignored.
use_vpc_cni_customize_script = true
- # When true, enable prefix delegation mode for the AWS VPC CNI component of the
- # EKS cluster. In prefix delegation mode, each ENI will be allocated 16 IP
- # addresses (/28) instead of 1, allowing you to pack more Pods per node. Note that
- # by default, AWS VPC CNI will always preallocate 1 full prefix - this means that
- # you can potentially take up 32 IP addresses from the VPC network space even if
- # you only have 1 Pod on the node. You can tweak this behavior by configuring the
- # var.vpc_cni_warm_ip_target input variable.
+ # When true, enable prefix delegation mode for the AWS VPC CNI component of
+ # the EKS cluster. In prefix delegation mode, each ENI will be allocated 16 IP
+ # addresses (/28) instead of 1, allowing you to pack more Pods per node. Note
+ # that by default, AWS VPC CNI will always preallocate 1 full prefix - this
+ # means that you can potentially take up 32 IP addresses from the VPC network
+ # space even if you only have 1 Pod on the node. You can tweak this behavior
+ # by configuring the var.vpc_cni_warm_ip_target input variable.
vpc_cni_enable_prefix_delegation = true
- # The minimum number of IP addresses (free and used) each node should start with.
- # When null, defaults to the aws-vpc-cni application setting (currently 16 as of
- # version 1.9.0). For example, if this is set to 25, every node will allocate 2
- # prefixes (32 IP addresses). On the other hand, if this was set to the default
- # value, then each node will allocate only 1 prefix (16 IP addresses).
+ # The minimum number of IP addresses (free and used) each node should start
+ # with. When null, defaults to the aws-vpc-cni application setting (currently
+ # 16 as of version 1.9.0). For example, if this is set to 25, every node will
+ # allocate 2 prefixes (32 IP addresses). On the other hand, if this was set to
+ # the default value, then each node will allocate only 1 prefix (16 IP
+ # addresses).
vpc_cni_minimum_ip_target = null
- # The number of free IP addresses each node should maintain. When null, defaults
- # to the aws-vpc-cni application setting (currently 16 as of version 1.9.0). In
- # prefix delegation mode, determines whether the node will preallocate another
- # full prefix. For example, if this is set to 5 and a node is currently has 9 Pods
- # scheduled, then the node will NOT preallocate a new prefix block of 16 IP
- # addresses. On the other hand, if this was set to the default value, then the
- # node will allocate a new block when the first pod is scheduled.
+ # The number of free IP addresses each node should maintain. When null,
+ # defaults to the aws-vpc-cni application setting (currently 16 as of version
+ # 1.9.0). In prefix delegation mode, determines whether the node will
+ # preallocate another full prefix. For example, if this is set to 5 and a node
+ # is currently has 9 Pods scheduled, then the node will NOT preallocate a new
+ # prefix block of 16 IP addresses. On the other hand, if this was set to the
+ # default value, then the node will allocate a new block when the first pod is
+ # scheduled.
vpc_cni_warm_ip_target = null
# The ID (ARN, alias ARN, AWS ID) of a customer managed KMS Key to use for
@@ -1404,20 +1424,19 @@ inputs = {
# var.enable_worker_cloudwatch_log_aggregation is true.
worker_cloudwatch_log_group_kms_key_id = null
- # Name of the CloudWatch Log Group where worker system logs are reported to. Only
- # used if var.enable_worker_cloudwatch_log_aggregation is true.
+ # Name of the CloudWatch Log Group where worker system logs are reported to.
+ # Only used if var.enable_worker_cloudwatch_log_aggregation is true.
worker_cloudwatch_log_group_name = null
# The number of days to retain log events in the worker system logs log group.
# Refer to
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # watch_log_group#retention_in_days for all the valid values. When null, the log
- # events are retained forever. Only used if
- # var.enable_worker_cloudwatch_log_aggregation is true.
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group#retention_in_days
+ # for all the valid values. When null, the log events are retained forever.
+ # Only used if var.enable_worker_cloudwatch_log_aggregation is true.
worker_cloudwatch_log_group_retention_in_days = null
- # Tags to apply on the worker system logs CloudWatch Log Group, encoded as a map
- # where the keys are tag keys and values are tag values. Only used if
+ # Tags to apply on the worker system logs CloudWatch Log Group, encoded as a
+ # map where the keys are tag keys and values are tag values. Only used if
# var.enable_worker_cloudwatch_log_aggregation is true.
worker_cloudwatch_log_group_tags = null
@@ -1425,14 +1444,15 @@ inputs = {
# mapped as Kubernetes Nodes.
worker_iam_role_arns_for_k8s_role_mapping = []
- # Prefix EKS worker resource names with this string. When you have multiple worker
- # groups for the cluster, you can use this to namespace the resources. Defaults to
- # empty string so that resource names are not excessively long by default.
+ # Prefix EKS worker resource names with this string. When you have multiple
+ # worker groups for the cluster, you can use this to namespace the resources.
+ # Defaults to empty string so that resource names are not excessively long by
+ # default.
worker_name_prefix = ""
- # A list of the subnets into which the EKS Cluster's administrative pods will be
- # launched. These should usually be all private subnets and include one in each
- # AWS Availability Zone. Required when
+ # A list of the subnets into which the EKS Cluster's administrative pods will
+ # be launched. These should usually be all private subnets and include one in
+ # each AWS Availability Zone. Required when
# var.schedule_control_plane_services_on_fargate is true.
worker_vpc_subnet_ids = []
@@ -3430,11 +3450,11 @@ The ID of the AWS Security Group associated with the self-managed EKS workers.
diff --git a/docs/reference/services/app-orchestration/auto-scaling-group-asg.md b/docs/reference/services/app-orchestration/auto-scaling-group-asg.md
index 197f081bfc..fcacf0978b 100644
--- a/docs/reference/services/app-orchestration/auto-scaling-group-asg.md
+++ b/docs/reference/services/app-orchestration/auto-scaling-group-asg.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Auto Scaling Group
-View Source
+View Source
Release Notes
@@ -55,7 +55,7 @@ access to this repo, email .
* [ASG Documentation](https://docs.aws.amazon.com/autoscaling/ec2/userguide/what-is-amazon-ec2-auto-scaling.html):
Amazon’s docs for ASG that cover core concepts such as launch templates and auto scaling groups.
-* [User Data](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/services/asg-service/core-concepts.md)
+* [User Data](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/services/asg-service/core-concepts.md)
## Deploy
@@ -63,7 +63,7 @@ access to this repo, email .
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -71,7 +71,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -90,21 +90,21 @@ If you want to deploy this repo in production, check out the following resources
module "asg_service" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/asg-service?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/asg-service?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
# ----------------------------------------------------------------------------------------------------
# The ID of the AMI to run on each instance in the ASG. The AMI needs to have
- # `ec2-baseline` installed, since by default it will run `start_ec2_baseline` on
- # the User Data.
+ # `ec2-baseline` installed, since by default it will run `start_ec2_baseline`
+ # on the User Data.
ami =
- # Properties on the AMI that can be used to lookup a prebuilt AMI for use with the
- # Bastion Host. You can build the AMI using the Packer template bastion-host.json.
- # Only used if var.ami is null. One of var.ami or var.ami_filters is required. Set
- # to null if passing the ami ID directly.
+ # Properties on the AMI that can be used to lookup a prebuilt AMI for use with
+ # the Bastion Host. You can build the AMI using the Packer template
+ # bastion-host.json. Only used if var.ami is null. One of var.ami or
+ # var.ami_filters is required. Set to null if passing the ami ID directly.
ami_filters =
- # Wait for this number of EC2 Instances to show up healthy in the load balancer on
- # creation.
+ # Wait for this number of EC2 Instances to show up healthy in the load
+ # balancer on creation.
min_elb_capacity =
# The minimum number of EC2 Instances to run in this ASG
@@ -140,16 +140,16 @@ module "asg_service" {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # A list of SNS topic ARNs to notify when the health check changes to ALARM, OK,
- # or INSUFFICIENT_DATA state. Note: these SNS topics MUST be in us-east-1! This is
- # because Route 53 only sends CloudWatch metrics to us-east-1, so we must create
- # the alarm in that region, and therefore, can only notify SNS topics in that
- # region.
+ # A list of SNS topic ARNs to notify when the health check changes to ALARM,
+ # OK, or INSUFFICIENT_DATA state. Note: these SNS topics MUST be in us-east-1!
+ # This is because Route 53 only sends CloudWatch metrics to us-east-1, so we
+ # must create the alarm in that region, and therefore, can only notify SNS
+ # topics in that region.
alarm_sns_topic_arns_us_east_1 = []
- # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and disk
- # space usage) should send notifications. Also used for the alarms if the Jenkins
- # backup job fails.
+ # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
+ # disk space usage) should send notifications. Also used for the alarms if the
+ # Jenkins backup job fails.
alarms_sns_topic_arn = []
# The CIDR blocks from which to allow access to the ports in var.server_ports
@@ -165,8 +165,9 @@ module "asg_service" {
# The security group IDs from which to allow SSH access
allow_ssh_security_group_ids = []
- # Cloud init scripts to run on the ASG instances during boot. See the part blocks
- # in https://www.terraform.io/docs/providers/template/d/cloudinit_config.html for
+ # Cloud init scripts to run on the ASG instances during boot. See the part
+ # blocks in
+ # https://www.terraform.io/docs/providers/template/d/cloudinit_config.html for
# syntax
cloud_init_parts = {}
@@ -175,103 +176,103 @@ module "asg_service" {
cloudwatch_log_group_kms_key_id = null
# The number of days to retain log events in the log group. Refer to
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # watch_log_group#retention_in_days for all the valid values. When null, the log
- # events are retained forever.
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group#retention_in_days
+ # for all the valid values. When null, the log events are retained forever.
cloudwatch_log_group_retention_in_days = null
- # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys are
- # tag keys and values are tag values.
+ # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys
+ # are tag keys and values are tag values.
cloudwatch_log_group_tags = null
# Set to true to create a DNS A record in Route 53 for this service.
create_route53_entry = false
- # A list of custom tags to apply to the EC2 Instances in this ASG. Each item in
- # this list should be a map with the parameters key, value, and
+ # A list of custom tags to apply to the EC2 Instances in this ASG. Each item
+ # in this list should be a map with the parameters key, value, and
# propagate_at_launch.
custom_tags = []
# The ARN of the Target Group to which to route traffic.
default_forward_target_group_arns = []
- # The default OS user for the service AMI. For example, for AWS Ubuntu AMIs, the
- # default OS user is 'ubuntu'.
+ # The default OS user for the service AMI. For example, for AWS Ubuntu AMIs,
+ # the default OS user is 'ubuntu'.
default_user = "ubuntu"
- # The desired number of EC2 Instances to run in the ASG initially. Note that auto
- # scaling policies may change this value. If you're using auto scaling policies to
- # dynamically resize the cluster, you should actually leave this value as null.
+ # The desired number of EC2 Instances to run in the ASG initially. Note that
+ # auto scaling policies may change this value. If you're using auto scaling
+ # policies to dynamically resize the cluster, you should actually leave this
+ # value as null.
desired_capacity = null
- # The domain name to register in var.hosted_zone_id (e.g. foo.example.com). Only
- # used if var.create_route53_entry is true.
+ # The domain name to register in var.hosted_zone_id (e.g. foo.example.com).
+ # Only used if var.create_route53_entry is true.
domain_name = null
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arn.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arn.
enable_cloudwatch_alarms = true
- # Set to true to add AIM permissions to send logs to CloudWatch. This is useful in
- # combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/log
- # /cloudwatch-log-aggregation-scripts to do log aggregation in CloudWatch.
+ # Set to true to add AIM permissions to send logs to CloudWatch. This is
+ # useful in combination with
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/logs/cloudwatch-log-aggregation-scripts
+ # to do log aggregation in CloudWatch.
enable_cloudwatch_log_aggregation = true
- # Set to true to add IAM permissions to send custom metrics to CloudWatch. This is
- # useful in combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/age
- # ts/cloudwatch-agent to get memory and disk metrics in CloudWatch for your Auto
- # Scaling Group
+ # Set to true to add IAM permissions to send custom metrics to CloudWatch.
+ # This is useful in combination with
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/agents/cloudwatch-agent
+ # to get memory and disk metrics in CloudWatch for your Auto Scaling Group
enable_cloudwatch_metrics = true
# Enable fail2ban to block brute force log in attempts. Defaults to true
enable_fail2ban = true
- # Enable ip-lockdown to block access to the instance metadata. Defaults to true
+ # Enable ip-lockdown to block access to the instance metadata. Defaults to
+ # true
enable_ip_lockdown = true
# If set to true, use Route 53 to perform health checks on var.domain_name.
enable_route53_health_check = false
- # A list of metrics the ASG should enable for monitoring all instances in a group.
- # The allowed values are GroupMinSize, GroupMaxSize, GroupDesiredCapacity,
- # GroupInServiceInstances, GroupPendingInstances, GroupStandbyInstances,
- # GroupTerminatingInstances, GroupTotalInstances.
+ # A list of metrics the ASG should enable for monitoring all instances in a
+ # group. The allowed values are GroupMinSize, GroupMaxSize,
+ # GroupDesiredCapacity, GroupInServiceInstances, GroupPendingInstances,
+ # GroupStandbyInstances, GroupTerminatingInstances, GroupTotalInstances.
enabled_metrics = []
- # Since our IAM users are defined in a separate AWS account, this variable is used
- # to specify the ARN of an IAM role that allows ssh-grunt to retrieve IAM group
- # and public SSH key info from that account.
+ # Since our IAM users are defined in a separate AWS account, this variable is
+ # used to specify the ARN of an IAM role that allows ssh-grunt to retrieve IAM
+ # group and public SSH key info from that account.
external_account_ssh_grunt_role_arn = ""
- # Listener rules for a fixed-response action. See comments below for information
- # about the parameters.
+ # Listener rules for a fixed-response action. See comments below for
+ # information about the parameters.
fixed_response_listener_rules = {}
- # Listener rules for a forward action that distributes requests among one or more
- # target groups. By default, sends traffic to the target groups created for the
- # ports in var.server_ports. See comments below for information about the
- # parameters.
+ # Listener rules for a forward action that distributes requests among one or
+ # more target groups. By default, sends traffic to the target groups created
+ # for the ports in var.server_ports. See comments below for information about
+ # the parameters.
forward_listener_rules = {}
# Time, in seconds, after an EC2 Instance comes into service before checking
# health.
health_check_grace_period = 300
- # The period, in seconds, over which to measure the CPU utilization percentage for
- # the ASG.
+ # The period, in seconds, over which to measure the CPU utilization percentage
+ # for the ASG.
high_asg_cpu_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster CPU utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster CPU utilization
+ # percentage above this threshold.
high_asg_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_cpu_utilization_treat_missing_data = "missing"
# The period, in seconds, over which to measure the root disk utilization
@@ -282,40 +283,40 @@ module "asg_service" {
# percentage above this threshold.
high_asg_disk_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_disk_utilization_treat_missing_data = "missing"
- # The period, in seconds, over which to measure the Memory utilization percentage
- # for the ASG.
+ # The period, in seconds, over which to measure the Memory utilization
+ # percentage for the ASG.
high_asg_memory_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster Memory utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster Memory utilization
+ # percentage above this threshold.
high_asg_memory_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_memory_utilization_treat_missing_data = "missing"
# The ID of the Route 53 Hosted Zone in which to create a DNS A record for the
# Auto Scaling Group. Optional if create_route53_entry = false.
hosted_zone_id = null
- # An object defining the policy to attach to `iam_role_name` if the IAM role is
- # going to be created. Accepts a map of objects, where the map keys are sids for
- # IAM policy statements, and the object fields are the resources, actions, and the
- # effect ("Allow" or "Deny") of the statement. Ignored if `iam_role_arn` is
- # provided. Leave as null if you do not wish to use IAM role with Service
- # Accounts.
+ # An object defining the policy to attach to `iam_role_name` if the IAM role
+ # is going to be created. Accepts a map of objects, where the map keys are
+ # sids for IAM policy statements, and the object fields are the resources,
+ # actions, and the effect ("Allow" or "Deny") of the statement. Ignored if
+ # `iam_role_arn` is provided. Leave as null if you do not wish to use IAM role
+ # with Service Accounts.
iam_policy = null
- # The name of a Key Pair that can be used to SSH to the EC2 Instances in the ASG.
- # Set to null if you don't want to enable Key Pair auth.
+ # The name of a Key Pair that can be used to SSH to the EC2 Instances in the
+ # ASG. Set to null if you don't want to enable Key Pair auth.
key_pair_name = null
# The ID of the Route 53 Hosted Zone in which to create a DNS A record for the
@@ -333,98 +334,101 @@ module "asg_service" {
# you're using the Application Load Balancer (ALB), see var.target_group_arns.
load_balancers = []
- # List of users on the ASG EC2 instances that should be permitted access to the
- # EC2 metadata.
+ # List of users on the ASG EC2 instances that should be permitted access to
+ # the EC2 metadata.
metadata_users = []
# The DNS name that was assigned by AWS to the load balancer upon creation
original_lb_dns_name = null
- # Listener rules for a redirect action. See comments below for information about
- # the parameters.
+ # Listener rules for a redirect action. See comments below for information
+ # about the parameters.
redirect_listener_rules = {}
- # The optional external_id to be used in the us-east-1 provider block defined in
- # the route53-health-check-alarms module. This module configures its own AWS
- # provider to ensure resources are created in us-east-1.
+ # The optional external_id to be used in the us-east-1 provider block defined
+ # in the route53-health-check-alarms module. This module configures its own
+ # AWS provider to ensure resources are created in us-east-1.
route53_health_check_provider_external_id = null
- # The optional AWS profile to be used in the us-east-1 provider block defined in
- # the route53-health-check-alarms module. This module configures its own AWS
- # provider to ensure resources are created in us-east-1.
+ # The optional AWS profile to be used in the us-east-1 provider block defined
+ # in the route53-health-check-alarms module. This module configures its own
+ # AWS provider to ensure resources are created in us-east-1.
route53_health_check_provider_profile = null
- # The optional role_arn to be used in the us-east-1 provider block defined in the
- # route53-health-check-alarms module. This module configures its own AWS provider
- # to ensure resources are created in us-east-1.
- route53_health_check_provider_role_arn = null
-
- # The optional session_name to be used in the us-east-1 provider block defined in
+ # The optional role_arn to be used in the us-east-1 provider block defined in
# the route53-health-check-alarms module. This module configures its own AWS
# provider to ensure resources are created in us-east-1.
+ route53_health_check_provider_role_arn = null
+
+ # The optional session_name to be used in the us-east-1 provider block defined
+ # in the route53-health-check-alarms module. This module configures its own
+ # AWS provider to ensure resources are created in us-east-1.
route53_health_check_provider_session_name = null
# The optional path to a credentials file used in the us-east-1 provider block
- # defined in the route53-health-check-alarms module. This module configures its
- # own AWS provider to ensure resources are created in us-east-1.
+ # defined in the route53-health-check-alarms module. This module configures
+ # its own AWS provider to ensure resources are created in us-east-1.
route53_health_check_provider_shared_credentials_file = null
- # A list of ARNs of Secrets Manager secrets that the task should have permissions
- # to read. The IAM role for the task will be granted
+ # A list of ARNs of Secrets Manager secrets that the task should have
+ # permissions to read. The IAM role for the task will be granted
# `secretsmanager:GetSecretValue` for each secret in the list. The ARN can be
# either the complete ARN, including the randomly generated suffix, or the ARN
# without the suffix. If the latter, the module will look up the full ARN
- # automatically. This is helpful in cases where you don't yet know the randomly
- # generated suffix because the rest of the ARN is a predictable value.
+ # automatically. This is helpful in cases where you don't yet know the
+ # randomly generated suffix because the rest of the ARN is a predictable
+ # value.
secrets_access = []
# The ports the EC2 instances listen on for requests. A Target Group will be
- # created for each port and any rules specified in var.forward_rules will forward
- # traffic to these Target Groups.
+ # created for each port and any rules specified in var.forward_rules will
+ # forward traffic to these Target Groups.
server_ports = {}
- # When true, precreate the CloudWatch Log Group to use for log aggregation from
- # the EC2 instances. This is useful if you wish to customize the CloudWatch Log
- # Group with various settings such as retention periods and KMS encryption. When
- # false, the CloudWatch agent will automatically create a basic log group to use.
+ # When true, precreate the CloudWatch Log Group to use for log aggregation
+ # from the EC2 instances. This is useful if you wish to customize the
+ # CloudWatch Log Group with various settings such as retention periods and KMS
+ # encryption. When false, the CloudWatch agent will automatically create a
+ # basic log group to use.
should_create_cloudwatch_log_group = true
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to the instances. To omit this variable, set it to an
- # empty string (do NOT use null, or Terraform will complain).
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to the instances. To omit this variable, set it
+ # to an empty string (do NOT use null, or Terraform will complain).
ssh_grunt_iam_group = "ssh-grunt-sudo-users"
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to the instances with sudo permissions. To omit this
- # variable, set it to an empty string (do NOT use null, or Terraform will
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to the instances with sudo permissions. To omit
+ # this variable, set it to an empty string (do NOT use null, or Terraform will
# complain).
ssh_grunt_iam_group_sudo = "ssh-grunt-sudo-users"
- # The port at which SSH will be allowed from var.allow_ssh_from_cidr_blocks and
- # var.allow_ssh_security_group_ids
+ # The port at which SSH will be allowed from var.allow_ssh_from_cidr_blocks
+ # and var.allow_ssh_security_group_ids
ssh_port = 22
- # The key for the tag that will be used to associate a unique identifier with this
- # ASG. This identifier will persist between redeploys of the ASG, even though the
- # underlying ASG is being deleted and replaced with a different one.
+ # The key for the tag that will be used to associate a unique identifier with
+ # this ASG. This identifier will persist between redeploys of the ASG, even
+ # though the underlying ASG is being deleted and replaced with a different
+ # one.
tag_asg_id_key = "AsgId"
- # A list of policies to decide how the instances in the auto scale group should be
- # terminated. The allowed values are OldestInstance, NewestInstance,
+ # A list of policies to decide how the instances in the auto scale group
+ # should be terminated. The allowed values are OldestInstance, NewestInstance,
# OldestLaunchConfiguration, ClosestToNextInstanceHour, Default.
termination_policies = []
- # Whether or not ELB or ALB health checks should be enabled. If set to true, the
- # load_balancers or target_groups_arns variable should be set depending on the
- # load balancer type you are using. Useful for testing connectivity before health
- # check endpoints are available.
+ # Whether or not ELB or ALB health checks should be enabled. If set to true,
+ # the load_balancers or target_groups_arns variable should be set depending on
+ # the load balancer type you are using. Useful for testing connectivity before
+ # health check endpoints are available.
use_elb_health_checks = true
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
# A maximum duration that Terraform should wait for the EC2 Instances to be
@@ -446,7 +450,7 @@ module "asg_service" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/asg-service?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/asg-service?ref=v0.104.12"
}
inputs = {
@@ -456,14 +460,14 @@ inputs = {
# ----------------------------------------------------------------------------------------------------
# The ID of the AMI to run on each instance in the ASG. The AMI needs to have
- # `ec2-baseline` installed, since by default it will run `start_ec2_baseline` on
- # the User Data.
+ # `ec2-baseline` installed, since by default it will run `start_ec2_baseline`
+ # on the User Data.
ami =
- # Properties on the AMI that can be used to lookup a prebuilt AMI for use with the
- # Bastion Host. You can build the AMI using the Packer template bastion-host.json.
- # Only used if var.ami is null. One of var.ami or var.ami_filters is required. Set
- # to null if passing the ami ID directly.
+ # Properties on the AMI that can be used to lookup a prebuilt AMI for use with
+ # the Bastion Host. You can build the AMI using the Packer template
+ # bastion-host.json. Only used if var.ami is null. One of var.ami or
+ # var.ami_filters is required. Set to null if passing the ami ID directly.
ami_filters =
- # Wait for this number of EC2 Instances to show up healthy in the load balancer on
- # creation.
+ # Wait for this number of EC2 Instances to show up healthy in the load
+ # balancer on creation.
min_elb_capacity =
# The minimum number of EC2 Instances to run in this ASG
@@ -499,16 +503,16 @@ inputs = {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # A list of SNS topic ARNs to notify when the health check changes to ALARM, OK,
- # or INSUFFICIENT_DATA state. Note: these SNS topics MUST be in us-east-1! This is
- # because Route 53 only sends CloudWatch metrics to us-east-1, so we must create
- # the alarm in that region, and therefore, can only notify SNS topics in that
- # region.
+ # A list of SNS topic ARNs to notify when the health check changes to ALARM,
+ # OK, or INSUFFICIENT_DATA state. Note: these SNS topics MUST be in us-east-1!
+ # This is because Route 53 only sends CloudWatch metrics to us-east-1, so we
+ # must create the alarm in that region, and therefore, can only notify SNS
+ # topics in that region.
alarm_sns_topic_arns_us_east_1 = []
- # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and disk
- # space usage) should send notifications. Also used for the alarms if the Jenkins
- # backup job fails.
+ # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
+ # disk space usage) should send notifications. Also used for the alarms if the
+ # Jenkins backup job fails.
alarms_sns_topic_arn = []
# The CIDR blocks from which to allow access to the ports in var.server_ports
@@ -524,8 +528,9 @@ inputs = {
# The security group IDs from which to allow SSH access
allow_ssh_security_group_ids = []
- # Cloud init scripts to run on the ASG instances during boot. See the part blocks
- # in https://www.terraform.io/docs/providers/template/d/cloudinit_config.html for
+ # Cloud init scripts to run on the ASG instances during boot. See the part
+ # blocks in
+ # https://www.terraform.io/docs/providers/template/d/cloudinit_config.html for
# syntax
cloud_init_parts = {}
@@ -534,103 +539,103 @@ inputs = {
cloudwatch_log_group_kms_key_id = null
# The number of days to retain log events in the log group. Refer to
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # watch_log_group#retention_in_days for all the valid values. When null, the log
- # events are retained forever.
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group#retention_in_days
+ # for all the valid values. When null, the log events are retained forever.
cloudwatch_log_group_retention_in_days = null
- # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys are
- # tag keys and values are tag values.
+ # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys
+ # are tag keys and values are tag values.
cloudwatch_log_group_tags = null
# Set to true to create a DNS A record in Route 53 for this service.
create_route53_entry = false
- # A list of custom tags to apply to the EC2 Instances in this ASG. Each item in
- # this list should be a map with the parameters key, value, and
+ # A list of custom tags to apply to the EC2 Instances in this ASG. Each item
+ # in this list should be a map with the parameters key, value, and
# propagate_at_launch.
custom_tags = []
# The ARN of the Target Group to which to route traffic.
default_forward_target_group_arns = []
- # The default OS user for the service AMI. For example, for AWS Ubuntu AMIs, the
- # default OS user is 'ubuntu'.
+ # The default OS user for the service AMI. For example, for AWS Ubuntu AMIs,
+ # the default OS user is 'ubuntu'.
default_user = "ubuntu"
- # The desired number of EC2 Instances to run in the ASG initially. Note that auto
- # scaling policies may change this value. If you're using auto scaling policies to
- # dynamically resize the cluster, you should actually leave this value as null.
+ # The desired number of EC2 Instances to run in the ASG initially. Note that
+ # auto scaling policies may change this value. If you're using auto scaling
+ # policies to dynamically resize the cluster, you should actually leave this
+ # value as null.
desired_capacity = null
- # The domain name to register in var.hosted_zone_id (e.g. foo.example.com). Only
- # used if var.create_route53_entry is true.
+ # The domain name to register in var.hosted_zone_id (e.g. foo.example.com).
+ # Only used if var.create_route53_entry is true.
domain_name = null
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arn.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arn.
enable_cloudwatch_alarms = true
- # Set to true to add AIM permissions to send logs to CloudWatch. This is useful in
- # combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/log
- # /cloudwatch-log-aggregation-scripts to do log aggregation in CloudWatch.
+ # Set to true to add AIM permissions to send logs to CloudWatch. This is
+ # useful in combination with
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/logs/cloudwatch-log-aggregation-scripts
+ # to do log aggregation in CloudWatch.
enable_cloudwatch_log_aggregation = true
- # Set to true to add IAM permissions to send custom metrics to CloudWatch. This is
- # useful in combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/age
- # ts/cloudwatch-agent to get memory and disk metrics in CloudWatch for your Auto
- # Scaling Group
+ # Set to true to add IAM permissions to send custom metrics to CloudWatch.
+ # This is useful in combination with
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/agents/cloudwatch-agent
+ # to get memory and disk metrics in CloudWatch for your Auto Scaling Group
enable_cloudwatch_metrics = true
# Enable fail2ban to block brute force log in attempts. Defaults to true
enable_fail2ban = true
- # Enable ip-lockdown to block access to the instance metadata. Defaults to true
+ # Enable ip-lockdown to block access to the instance metadata. Defaults to
+ # true
enable_ip_lockdown = true
# If set to true, use Route 53 to perform health checks on var.domain_name.
enable_route53_health_check = false
- # A list of metrics the ASG should enable for monitoring all instances in a group.
- # The allowed values are GroupMinSize, GroupMaxSize, GroupDesiredCapacity,
- # GroupInServiceInstances, GroupPendingInstances, GroupStandbyInstances,
- # GroupTerminatingInstances, GroupTotalInstances.
+ # A list of metrics the ASG should enable for monitoring all instances in a
+ # group. The allowed values are GroupMinSize, GroupMaxSize,
+ # GroupDesiredCapacity, GroupInServiceInstances, GroupPendingInstances,
+ # GroupStandbyInstances, GroupTerminatingInstances, GroupTotalInstances.
enabled_metrics = []
- # Since our IAM users are defined in a separate AWS account, this variable is used
- # to specify the ARN of an IAM role that allows ssh-grunt to retrieve IAM group
- # and public SSH key info from that account.
+ # Since our IAM users are defined in a separate AWS account, this variable is
+ # used to specify the ARN of an IAM role that allows ssh-grunt to retrieve IAM
+ # group and public SSH key info from that account.
external_account_ssh_grunt_role_arn = ""
- # Listener rules for a fixed-response action. See comments below for information
- # about the parameters.
+ # Listener rules for a fixed-response action. See comments below for
+ # information about the parameters.
fixed_response_listener_rules = {}
- # Listener rules for a forward action that distributes requests among one or more
- # target groups. By default, sends traffic to the target groups created for the
- # ports in var.server_ports. See comments below for information about the
- # parameters.
+ # Listener rules for a forward action that distributes requests among one or
+ # more target groups. By default, sends traffic to the target groups created
+ # for the ports in var.server_ports. See comments below for information about
+ # the parameters.
forward_listener_rules = {}
# Time, in seconds, after an EC2 Instance comes into service before checking
# health.
health_check_grace_period = 300
- # The period, in seconds, over which to measure the CPU utilization percentage for
- # the ASG.
+ # The period, in seconds, over which to measure the CPU utilization percentage
+ # for the ASG.
high_asg_cpu_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster CPU utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster CPU utilization
+ # percentage above this threshold.
high_asg_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_cpu_utilization_treat_missing_data = "missing"
# The period, in seconds, over which to measure the root disk utilization
@@ -641,40 +646,40 @@ inputs = {
# percentage above this threshold.
high_asg_disk_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_disk_utilization_treat_missing_data = "missing"
- # The period, in seconds, over which to measure the Memory utilization percentage
- # for the ASG.
+ # The period, in seconds, over which to measure the Memory utilization
+ # percentage for the ASG.
high_asg_memory_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster Memory utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster Memory utilization
+ # percentage above this threshold.
high_asg_memory_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_memory_utilization_treat_missing_data = "missing"
# The ID of the Route 53 Hosted Zone in which to create a DNS A record for the
# Auto Scaling Group. Optional if create_route53_entry = false.
hosted_zone_id = null
- # An object defining the policy to attach to `iam_role_name` if the IAM role is
- # going to be created. Accepts a map of objects, where the map keys are sids for
- # IAM policy statements, and the object fields are the resources, actions, and the
- # effect ("Allow" or "Deny") of the statement. Ignored if `iam_role_arn` is
- # provided. Leave as null if you do not wish to use IAM role with Service
- # Accounts.
+ # An object defining the policy to attach to `iam_role_name` if the IAM role
+ # is going to be created. Accepts a map of objects, where the map keys are
+ # sids for IAM policy statements, and the object fields are the resources,
+ # actions, and the effect ("Allow" or "Deny") of the statement. Ignored if
+ # `iam_role_arn` is provided. Leave as null if you do not wish to use IAM role
+ # with Service Accounts.
iam_policy = null
- # The name of a Key Pair that can be used to SSH to the EC2 Instances in the ASG.
- # Set to null if you don't want to enable Key Pair auth.
+ # The name of a Key Pair that can be used to SSH to the EC2 Instances in the
+ # ASG. Set to null if you don't want to enable Key Pair auth.
key_pair_name = null
# The ID of the Route 53 Hosted Zone in which to create a DNS A record for the
@@ -692,98 +697,101 @@ inputs = {
# you're using the Application Load Balancer (ALB), see var.target_group_arns.
load_balancers = []
- # List of users on the ASG EC2 instances that should be permitted access to the
- # EC2 metadata.
+ # List of users on the ASG EC2 instances that should be permitted access to
+ # the EC2 metadata.
metadata_users = []
# The DNS name that was assigned by AWS to the load balancer upon creation
original_lb_dns_name = null
- # Listener rules for a redirect action. See comments below for information about
- # the parameters.
+ # Listener rules for a redirect action. See comments below for information
+ # about the parameters.
redirect_listener_rules = {}
- # The optional external_id to be used in the us-east-1 provider block defined in
- # the route53-health-check-alarms module. This module configures its own AWS
- # provider to ensure resources are created in us-east-1.
+ # The optional external_id to be used in the us-east-1 provider block defined
+ # in the route53-health-check-alarms module. This module configures its own
+ # AWS provider to ensure resources are created in us-east-1.
route53_health_check_provider_external_id = null
- # The optional AWS profile to be used in the us-east-1 provider block defined in
- # the route53-health-check-alarms module. This module configures its own AWS
- # provider to ensure resources are created in us-east-1.
+ # The optional AWS profile to be used in the us-east-1 provider block defined
+ # in the route53-health-check-alarms module. This module configures its own
+ # AWS provider to ensure resources are created in us-east-1.
route53_health_check_provider_profile = null
- # The optional role_arn to be used in the us-east-1 provider block defined in the
- # route53-health-check-alarms module. This module configures its own AWS provider
- # to ensure resources are created in us-east-1.
- route53_health_check_provider_role_arn = null
-
- # The optional session_name to be used in the us-east-1 provider block defined in
+ # The optional role_arn to be used in the us-east-1 provider block defined in
# the route53-health-check-alarms module. This module configures its own AWS
# provider to ensure resources are created in us-east-1.
+ route53_health_check_provider_role_arn = null
+
+ # The optional session_name to be used in the us-east-1 provider block defined
+ # in the route53-health-check-alarms module. This module configures its own
+ # AWS provider to ensure resources are created in us-east-1.
route53_health_check_provider_session_name = null
# The optional path to a credentials file used in the us-east-1 provider block
- # defined in the route53-health-check-alarms module. This module configures its
- # own AWS provider to ensure resources are created in us-east-1.
+ # defined in the route53-health-check-alarms module. This module configures
+ # its own AWS provider to ensure resources are created in us-east-1.
route53_health_check_provider_shared_credentials_file = null
- # A list of ARNs of Secrets Manager secrets that the task should have permissions
- # to read. The IAM role for the task will be granted
+ # A list of ARNs of Secrets Manager secrets that the task should have
+ # permissions to read. The IAM role for the task will be granted
# `secretsmanager:GetSecretValue` for each secret in the list. The ARN can be
# either the complete ARN, including the randomly generated suffix, or the ARN
# without the suffix. If the latter, the module will look up the full ARN
- # automatically. This is helpful in cases where you don't yet know the randomly
- # generated suffix because the rest of the ARN is a predictable value.
+ # automatically. This is helpful in cases where you don't yet know the
+ # randomly generated suffix because the rest of the ARN is a predictable
+ # value.
secrets_access = []
# The ports the EC2 instances listen on for requests. A Target Group will be
- # created for each port and any rules specified in var.forward_rules will forward
- # traffic to these Target Groups.
+ # created for each port and any rules specified in var.forward_rules will
+ # forward traffic to these Target Groups.
server_ports = {}
- # When true, precreate the CloudWatch Log Group to use for log aggregation from
- # the EC2 instances. This is useful if you wish to customize the CloudWatch Log
- # Group with various settings such as retention periods and KMS encryption. When
- # false, the CloudWatch agent will automatically create a basic log group to use.
+ # When true, precreate the CloudWatch Log Group to use for log aggregation
+ # from the EC2 instances. This is useful if you wish to customize the
+ # CloudWatch Log Group with various settings such as retention periods and KMS
+ # encryption. When false, the CloudWatch agent will automatically create a
+ # basic log group to use.
should_create_cloudwatch_log_group = true
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to the instances. To omit this variable, set it to an
- # empty string (do NOT use null, or Terraform will complain).
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to the instances. To omit this variable, set it
+ # to an empty string (do NOT use null, or Terraform will complain).
ssh_grunt_iam_group = "ssh-grunt-sudo-users"
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to the instances with sudo permissions. To omit this
- # variable, set it to an empty string (do NOT use null, or Terraform will
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to the instances with sudo permissions. To omit
+ # this variable, set it to an empty string (do NOT use null, or Terraform will
# complain).
ssh_grunt_iam_group_sudo = "ssh-grunt-sudo-users"
- # The port at which SSH will be allowed from var.allow_ssh_from_cidr_blocks and
- # var.allow_ssh_security_group_ids
+ # The port at which SSH will be allowed from var.allow_ssh_from_cidr_blocks
+ # and var.allow_ssh_security_group_ids
ssh_port = 22
- # The key for the tag that will be used to associate a unique identifier with this
- # ASG. This identifier will persist between redeploys of the ASG, even though the
- # underlying ASG is being deleted and replaced with a different one.
+ # The key for the tag that will be used to associate a unique identifier with
+ # this ASG. This identifier will persist between redeploys of the ASG, even
+ # though the underlying ASG is being deleted and replaced with a different
+ # one.
tag_asg_id_key = "AsgId"
- # A list of policies to decide how the instances in the auto scale group should be
- # terminated. The allowed values are OldestInstance, NewestInstance,
+ # A list of policies to decide how the instances in the auto scale group
+ # should be terminated. The allowed values are OldestInstance, NewestInstance,
# OldestLaunchConfiguration, ClosestToNextInstanceHour, Default.
termination_policies = []
- # Whether or not ELB or ALB health checks should be enabled. If set to true, the
- # load_balancers or target_groups_arns variable should be set depending on the
- # load balancer type you are using. Useful for testing connectivity before health
- # check endpoints are available.
+ # Whether or not ELB or ALB health checks should be enabled. If set to true,
+ # the load_balancers or target_groups_arns variable should be set depending on
+ # the load balancer type you are using. Useful for testing connectivity before
+ # health check endpoints are available.
use_elb_health_checks = true
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
# A maximum duration that Terraform should wait for the EC2 Instances to be
@@ -2029,11 +2037,11 @@ The ID of the Security Group that belongs to the ASG.
diff --git a/docs/reference/services/app-orchestration/ec-2-instance.md b/docs/reference/services/app-orchestration/ec-2-instance.md
index 9ae37f175c..9be5134e75 100644
--- a/docs/reference/services/app-orchestration/ec-2-instance.md
+++ b/docs/reference/services/app-orchestration/ec-2-instance.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# EC2 Instance
-View Source
+View Source
Release Notes
@@ -58,9 +58,9 @@ If you’ve never used the Service Catalog before, make sure to read
### Core concepts
-* [How do I update my instance?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/services/ec2-instance/core-concepts.md#how-do-i-update-my-instance)
-* [How do I use User Data?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/services/ec2-instance/core-concepts.md#how-do-i-use-user-data)
-* [How do I mount an EBS volume?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/services/ec2-instance/core-concepts.md#how-do-i-mount-an-ebs-volume)
+* [How do I update my instance?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/services/ec2-instance/core-concepts.md#how-do-i-update-my-instance)
+* [How do I use User Data?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/services/ec2-instance/core-concepts.md#how-do-i-use-user-data)
+* [How do I mount an EBS volume?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/services/ec2-instance/core-concepts.md#how-do-i-mount-an-ebs-volume)
### The EC2 Instance AMI
@@ -85,7 +85,7 @@ This template configures the AMI to:
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The `examples/for-learning-and-testing`
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The `examples/for-learning-and-testing`
folder contains standalone sample code optimized for learning, experimenting, and testing (but not direct
production usage).
@@ -93,7 +93,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog, configure CI / CD for your apps and
@@ -113,7 +113,7 @@ If you want to deploy this repo in production, check out the following resources
module "ec_2_instance" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/ec2-instance?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/ec2-instance?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -127,7 +127,8 @@ module "ec_2_instance" {
cidr_blocks = list(string)
))>
- # Accept inbound traffic on these port ranges from the specified security groups
+ # Accept inbound traffic on these port ranges from the specified security
+ # groups
allow_port_from_security_group_ids =
# The AMI to run on the EC2 instance. This should be built from the Packer
- # template under ec2-instance.json. One of var.ami or var.ami_filters is required.
- # Set to null if looking up the ami with filters.
+ # template under ec2-instance.json. One of var.ami or var.ami_filters is
+ # required. Set to null if looking up the ami with filters.
ami =
- # Properties on the AMI that can be used to lookup a prebuilt AMI for use with the
- # EC2 instance. You can build the AMI using the Packer template ec2-instance.json.
- # Only used if var.ami is null. One of var.ami or var.ami_filters is required. Set
- # to null if passing the ami ID directly.
+ # Properties on the AMI that can be used to lookup a prebuilt AMI for use with
+ # the EC2 instance. You can build the AMI using the Packer template
+ # ec2-instance.json. Only used if var.ami is null. One of var.ami or
+ # var.ami_filters is required. Set to null if passing the ami ID directly.
ami_filters =
- # The name of the EC2 instance and the other resources created by these templates
+ # The name of the EC2 instance and the other resources created by these
+ # templates
name =
- # The domain name to use to look up the Route 53 hosted zone. Will be a subset of
- # fully_qualified_domain_name: e.g., my-company.com. Only one of
+ # The domain name to use to look up the Route 53 hosted zone. Will be a subset
+ # of fully_qualified_domain_name: e.g., my-company.com. Only one of
# route53_lookup_domain_name or route53_zone_id should be used.
route53_lookup_domain_name =
# The ID of the hosted zone to use. Allows specifying the hosted zone directly
- # instead of looking it up via domain name. Only one of route53_lookup_domain_name
- # or route53_zone_id should be used.
+ # instead of looking it up via domain name. Only one of
+ # route53_lookup_domain_name or route53_zone_id should be used.
route53_zone_id =
- # The ID of the subnet in which to deploy the EC2 instance. Must be a subnet in
- # var.vpc_id.
+ # The ID of the subnet in which to deploy the EC2 instance. Must be a subnet
+ # in var.vpc_id.
subnet_id =
# The ID of the VPC in which to deploy the EC2 instance.
@@ -192,18 +194,20 @@ module "ec_2_instance" {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # A list of optional additional security group ids to assign to the EC2 instance.
+ # A list of optional additional security group ids to assign to the EC2
+ # instance.
additional_security_group_ids = []
- # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and disk
- # space usage) should send notifications.
+ # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
+ # disk space usage) should send notifications.
alarms_sns_topic_arn = []
# Determines if an Elastic IP (EIP) will be created for this instance.
attach_eip = true
# Tags to use to filter the Route 53 Hosted Zones that might match the hosted
- # zone's name (use if you have multiple public hosted zones with the same name)
+ # zone's name (use if you have multiple public hosted zones with the same
+ # name)
base_domain_name_tags = {}
# Cloud init scripts to run on the EC2 instance while it boots. See the part
@@ -217,38 +221,37 @@ module "ec_2_instance" {
cloudwatch_log_group_kms_key_id = null
# The number of days to retain log events in the log group. Refer to
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # watch_log_group#retention_in_days for all the valid values. When null, the log
- # events are retained forever.
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group#retention_in_days
+ # for all the valid values. When null, the log events are retained forever.
cloudwatch_log_group_retention_in_days = null
- # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys are
- # tag keys and values are tag values.
+ # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys
+ # are tag keys and values are tag values.
cloudwatch_log_group_tags = null
- # Set to true to create a DNS record in Route53 pointing to the EC2 instance. If
- # true, be sure to set var.fully_qualified_domain_name.
+ # Set to true to create a DNS record in Route53 pointing to the EC2 instance.
+ # If true, be sure to set var.fully_qualified_domain_name.
create_dns_record = true
- # When true, this module will create a new IAM role to bind to the EC2 instance.
- # Set to false if you wish to use a preexisting IAM role. By default, this module
- # will create an instance profile to pass this IAM role to the EC2 instance.
- # Preexisting IAM roles created through the AWS console instead of programatically
- # (e.g. withTerraform) will automatically create an instance profile with the same
- # name. In that case, set create_instance_profile to false to avoid errors during
- # Terraform apply.
+ # When true, this module will create a new IAM role to bind to the EC2
+ # instance. Set to false if you wish to use a preexisting IAM role. By
+ # default, this module will create an instance profile to pass this IAM role
+ # to the EC2 instance. Preexisting IAM roles created through the AWS console
+ # instead of programatically (e.g. withTerraform) will automatically create an
+ # instance profile with the same name. In that case, set
+ # create_instance_profile to false to avoid errors during Terraform apply.
create_iam_role = true
# When true, this module will create an instance profile to pass the IAM role,
# either the one created by this module or one passed externally, to the EC2
- # instance. Set to false if you wish to use a preexisting instance profile. For
- # more information see
- # https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_in
- # tance-profiles.html.
+ # instance. Set to false if you wish to use a preexisting instance profile.
+ # For more information see
+ # https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html.
create_instance_profile = true
- # The default OS user for the EC2 instance AMI. For AWS Ubuntu AMIs, which is what
- # the Packer template in ec2-instance.json uses, the default OS user is 'ubuntu'.
+ # The default OS user for the EC2 instance AMI. For AWS Ubuntu AMIs, which is
+ # what the Packer template in ec2-instance.json uses, the default OS user is
+ # 'ubuntu'.
default_user = "ubuntu"
# DNS Time To Live in seconds.
@@ -257,59 +260,59 @@ module "ec_2_instance" {
# If true, the launched EC2 Instance will be EBS-optimized.
ebs_optimized = true
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arn.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arn.
enable_cloudwatch_alarms = true
# Set to true to send logs to CloudWatch. This is useful in combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/log
- # /cloudwatch-log-aggregation-scripts to do log aggregation in CloudWatch.
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/logs/cloudwatch-log-aggregation-scripts
+ # to do log aggregation in CloudWatch.
enable_cloudwatch_log_aggregation = true
- # Set to true to add IAM permissions to send custom metrics to CloudWatch. This is
- # useful in combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/met
- # ics/cloudwatch-memory-disk-metrics-scripts to get memory and disk metrics in
- # CloudWatch for your EC2 instance.
+ # Set to true to add IAM permissions to send custom metrics to CloudWatch.
+ # This is useful in combination with
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/metrics/cloudwatch-memory-disk-metrics-scripts
+ # to get memory and disk metrics in CloudWatch for your EC2 instance.
enable_cloudwatch_metrics = true
# Enable fail2ban to block brute force log in attempts. Defaults to true.
enable_fail2ban = true
- # Enable ip-lockdown to block access to the instance metadata. Defaults to true.
+ # Enable ip-lockdown to block access to the instance metadata. Defaults to
+ # true.
enable_ip_lockdown = true
# Set to true to add IAM permissions for ssh-grunt
- # (https://github.com/gruntwork-io/terraform-aws-security/tree/master/modules/ssh-
- # runt), which will allow you to manage SSH access via IAM groups.
+ # (https://github.com/gruntwork-io/terraform-aws-security/tree/master/modules/ssh-grunt),
+ # which will allow you to manage SSH access via IAM groups.
enable_ssh_grunt = true
- # If you are using ssh-grunt and your IAM users / groups are defined in a separate
- # AWS account, you can use this variable to specify the ARN of an IAM role that
- # ssh-grunt can assume to retrieve IAM group and public SSH key info from that
- # account. To omit this variable, set it to an empty string (do NOT use null, or
- # Terraform will complain).
+ # If you are using ssh-grunt and your IAM users / groups are defined in a
+ # separate AWS account, you can use this variable to specify the ARN of an IAM
+ # role that ssh-grunt can assume to retrieve IAM group and public SSH key info
+ # from that account. To omit this variable, set it to an empty string (do NOT
+ # use null, or Terraform will complain).
external_account_ssh_grunt_role_arn = ""
- # The apex domain of the hostname for the EC2 instance (e.g., example.com). The
- # complete hostname for the EC2 instance will be
- # var.name.var.fully_qualified_domain_name (e.g., bastion.example.com). Only used
- # if create_dns_record is true.
+ # The apex domain of the hostname for the EC2 instance (e.g., example.com).
+ # The complete hostname for the EC2 instance will be
+ # var.name.var.fully_qualified_domain_name (e.g., bastion.example.com). Only
+ # used if create_dns_record is true.
fully_qualified_domain_name = ""
- # The period, in seconds, over which to measure the CPU utilization percentage for
- # the instance.
+ # The period, in seconds, over which to measure the CPU utilization percentage
+ # for the instance.
high_instance_cpu_utilization_period = 60
- # Trigger an alarm if the EC2 instance has a CPU utilization percentage above this
- # threshold.
+ # Trigger an alarm if the EC2 instance has a CPU utilization percentage above
+ # this threshold.
high_instance_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_instance_cpu_utilization_treat_missing_data = "missing"
# The period, in seconds, over which to measure the root disk utilization
@@ -320,54 +323,56 @@ module "ec_2_instance" {
# above this threshold.
high_instance_disk_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_instance_disk_utilization_treat_missing_data = "missing"
- # The period, in seconds, over which to measure the Memory utilization percentage
- # for the instance.
+ # The period, in seconds, over which to measure the Memory utilization
+ # percentage for the instance.
high_instance_memory_utilization_period = 60
- # Trigger an alarm if the EC2 instance has a Memory utilization percentage above
- # this threshold.
+ # Trigger an alarm if the EC2 instance has a Memory utilization percentage
+ # above this threshold.
high_instance_memory_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_instance_memory_utilization_treat_missing_data = "missing"
# The name for the bastion host's IAM role and instance profile. If set to an
# empty string, will use var.name. Required when create_iam_role is false.
iam_role_name = ""
- # The name of a Key Pair that can be used to SSH to this instance. This instance
- # may have ssh-grunt installed. The preferred way to do SSH access is with your
- # own IAM user name and SSH key. This Key Pair is only as a fallback.
+ # The name of a Key Pair that can be used to SSH to this instance. This
+ # instance may have ssh-grunt installed. The preferred way to do SSH access is
+ # with your own IAM user name and SSH key. This Key Pair is only as a
+ # fallback.
keypair_name = null
# Whether the metadata service is available. Valid values include enabled or
# disabled. Defaults to enabled.
metadata_http_endpoint = "enabled"
- # Desired HTTP PUT response hop limit for instance metadata requests. The larger
- # the number, the further instance metadata requests can travel. Valid values are
- # integer from 1 to 64. Defaults to 1.
+ # Desired HTTP PUT response hop limit for instance metadata requests. The
+ # larger the number, the further instance metadata requests can travel. Valid
+ # values are integer from 1 to 64. Defaults to 1.
metadata_http_put_response_hop_limit = 1
- # Whether or not the metadata service requires session tokens, also referred to as
- # Instance Metadata Service Version 2 (IMDSv2). Valid values include optional or
- # required. Defaults to optional.
+ # Whether or not the metadata service requires session tokens, also referred
+ # to as Instance Metadata Service Version 2 (IMDSv2). Valid values include
+ # optional or required. Defaults to optional.
metadata_http_tokens = "optional"
- # Enables or disables access to instance tags from the instance metadata service.
- # Valid values include enabled or disabled. Defaults to disabled.
+ # Enables or disables access to instance tags from the instance metadata
+ # service. Valid values include enabled or disabled. Defaults to disabled.
metadata_tags = "disabled"
- # If set to true, the root volume will be deleted when the Instance is terminated.
+ # If set to true, the root volume will be deleted when the Instance is
+ # terminated.
root_volume_delete_on_termination = true
# The size of the root volume, in gigabytes.
@@ -376,38 +381,39 @@ module "ec_2_instance" {
# The root volume type. Must be one of: standard, gp2, io1.
root_volume_type = "standard"
- # A list of secondary private IPv4 addresses to assign to the instance's primary
- # network interface (eth0) in a VPC
+ # A list of secondary private IPv4 addresses to assign to the instance's
+ # primary network interface (eth0) in a VPC
secondary_private_ips = []
- # When true, precreate the CloudWatch Log Group to use for log aggregation from
- # the EC2 instances. This is useful if you wish to customize the CloudWatch Log
- # Group with various settings such as retention periods and KMS encryption. When
- # false, the CloudWatch agent will automatically create a basic log group to use.
+ # When true, precreate the CloudWatch Log Group to use for log aggregation
+ # from the EC2 instances. This is useful if you wish to customize the
+ # CloudWatch Log Group with various settings such as retention periods and KMS
+ # encryption. When false, the CloudWatch agent will automatically create a
+ # basic log group to use.
should_create_cloudwatch_log_group = true
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to this EC2 instance. To omit this variable, set it to an
- # empty string (do NOT use null, or Terraform will complain).
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to this EC2 instance. To omit this variable,
+ # set it to an empty string (do NOT use null, or Terraform will complain).
ssh_grunt_iam_group = ""
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to this EC2 instance. To omit this variable, set it to an
- # empty string (do NOT use null, or Terraform will complain).
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to this EC2 instance. To omit this variable,
+ # set it to an empty string (do NOT use null, or Terraform will complain).
ssh_grunt_iam_group_sudo = ""
- # A map of tags to apply to the EC2 instance and the S3 Buckets. The key is the
- # tag name and the value is the tag value.
+ # A map of tags to apply to the EC2 instance and the S3 Buckets. The key is
+ # the tag name and the value is the tag value.
tags = {}
# The tenancy of this instance. Must be one of: default, dedicated, or host.
tenancy = "default"
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
}
@@ -425,7 +431,7 @@ module "ec_2_instance" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/ec2-instance?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/ec2-instance?ref=v0.104.12"
}
inputs = {
@@ -442,7 +448,8 @@ inputs = {
cidr_blocks = list(string)
))>
- # Accept inbound traffic on these port ranges from the specified security groups
+ # Accept inbound traffic on these port ranges from the specified security
+ # groups
allow_port_from_security_group_ids =
# The AMI to run on the EC2 instance. This should be built from the Packer
- # template under ec2-instance.json. One of var.ami or var.ami_filters is required.
- # Set to null if looking up the ami with filters.
+ # template under ec2-instance.json. One of var.ami or var.ami_filters is
+ # required. Set to null if looking up the ami with filters.
ami =
- # Properties on the AMI that can be used to lookup a prebuilt AMI for use with the
- # EC2 instance. You can build the AMI using the Packer template ec2-instance.json.
- # Only used if var.ami is null. One of var.ami or var.ami_filters is required. Set
- # to null if passing the ami ID directly.
+ # Properties on the AMI that can be used to lookup a prebuilt AMI for use with
+ # the EC2 instance. You can build the AMI using the Packer template
+ # ec2-instance.json. Only used if var.ami is null. One of var.ami or
+ # var.ami_filters is required. Set to null if passing the ami ID directly.
ami_filters =
- # The name of the EC2 instance and the other resources created by these templates
+ # The name of the EC2 instance and the other resources created by these
+ # templates
name =
- # The domain name to use to look up the Route 53 hosted zone. Will be a subset of
- # fully_qualified_domain_name: e.g., my-company.com. Only one of
+ # The domain name to use to look up the Route 53 hosted zone. Will be a subset
+ # of fully_qualified_domain_name: e.g., my-company.com. Only one of
# route53_lookup_domain_name or route53_zone_id should be used.
route53_lookup_domain_name =
# The ID of the hosted zone to use. Allows specifying the hosted zone directly
- # instead of looking it up via domain name. Only one of route53_lookup_domain_name
- # or route53_zone_id should be used.
+ # instead of looking it up via domain name. Only one of
+ # route53_lookup_domain_name or route53_zone_id should be used.
route53_zone_id =
- # The ID of the subnet in which to deploy the EC2 instance. Must be a subnet in
- # var.vpc_id.
+ # The ID of the subnet in which to deploy the EC2 instance. Must be a subnet
+ # in var.vpc_id.
subnet_id =
# The ID of the VPC in which to deploy the EC2 instance.
@@ -507,18 +515,20 @@ inputs = {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # A list of optional additional security group ids to assign to the EC2 instance.
+ # A list of optional additional security group ids to assign to the EC2
+ # instance.
additional_security_group_ids = []
- # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and disk
- # space usage) should send notifications.
+ # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
+ # disk space usage) should send notifications.
alarms_sns_topic_arn = []
# Determines if an Elastic IP (EIP) will be created for this instance.
attach_eip = true
# Tags to use to filter the Route 53 Hosted Zones that might match the hosted
- # zone's name (use if you have multiple public hosted zones with the same name)
+ # zone's name (use if you have multiple public hosted zones with the same
+ # name)
base_domain_name_tags = {}
# Cloud init scripts to run on the EC2 instance while it boots. See the part
@@ -532,38 +542,37 @@ inputs = {
cloudwatch_log_group_kms_key_id = null
# The number of days to retain log events in the log group. Refer to
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # watch_log_group#retention_in_days for all the valid values. When null, the log
- # events are retained forever.
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group#retention_in_days
+ # for all the valid values. When null, the log events are retained forever.
cloudwatch_log_group_retention_in_days = null
- # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys are
- # tag keys and values are tag values.
+ # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys
+ # are tag keys and values are tag values.
cloudwatch_log_group_tags = null
- # Set to true to create a DNS record in Route53 pointing to the EC2 instance. If
- # true, be sure to set var.fully_qualified_domain_name.
+ # Set to true to create a DNS record in Route53 pointing to the EC2 instance.
+ # If true, be sure to set var.fully_qualified_domain_name.
create_dns_record = true
- # When true, this module will create a new IAM role to bind to the EC2 instance.
- # Set to false if you wish to use a preexisting IAM role. By default, this module
- # will create an instance profile to pass this IAM role to the EC2 instance.
- # Preexisting IAM roles created through the AWS console instead of programatically
- # (e.g. withTerraform) will automatically create an instance profile with the same
- # name. In that case, set create_instance_profile to false to avoid errors during
- # Terraform apply.
+ # When true, this module will create a new IAM role to bind to the EC2
+ # instance. Set to false if you wish to use a preexisting IAM role. By
+ # default, this module will create an instance profile to pass this IAM role
+ # to the EC2 instance. Preexisting IAM roles created through the AWS console
+ # instead of programatically (e.g. withTerraform) will automatically create an
+ # instance profile with the same name. In that case, set
+ # create_instance_profile to false to avoid errors during Terraform apply.
create_iam_role = true
# When true, this module will create an instance profile to pass the IAM role,
# either the one created by this module or one passed externally, to the EC2
- # instance. Set to false if you wish to use a preexisting instance profile. For
- # more information see
- # https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_in
- # tance-profiles.html.
+ # instance. Set to false if you wish to use a preexisting instance profile.
+ # For more information see
+ # https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html.
create_instance_profile = true
- # The default OS user for the EC2 instance AMI. For AWS Ubuntu AMIs, which is what
- # the Packer template in ec2-instance.json uses, the default OS user is 'ubuntu'.
+ # The default OS user for the EC2 instance AMI. For AWS Ubuntu AMIs, which is
+ # what the Packer template in ec2-instance.json uses, the default OS user is
+ # 'ubuntu'.
default_user = "ubuntu"
# DNS Time To Live in seconds.
@@ -572,59 +581,59 @@ inputs = {
# If true, the launched EC2 Instance will be EBS-optimized.
ebs_optimized = true
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arn.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arn.
enable_cloudwatch_alarms = true
# Set to true to send logs to CloudWatch. This is useful in combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/log
- # /cloudwatch-log-aggregation-scripts to do log aggregation in CloudWatch.
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/logs/cloudwatch-log-aggregation-scripts
+ # to do log aggregation in CloudWatch.
enable_cloudwatch_log_aggregation = true
- # Set to true to add IAM permissions to send custom metrics to CloudWatch. This is
- # useful in combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/met
- # ics/cloudwatch-memory-disk-metrics-scripts to get memory and disk metrics in
- # CloudWatch for your EC2 instance.
+ # Set to true to add IAM permissions to send custom metrics to CloudWatch.
+ # This is useful in combination with
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/metrics/cloudwatch-memory-disk-metrics-scripts
+ # to get memory and disk metrics in CloudWatch for your EC2 instance.
enable_cloudwatch_metrics = true
# Enable fail2ban to block brute force log in attempts. Defaults to true.
enable_fail2ban = true
- # Enable ip-lockdown to block access to the instance metadata. Defaults to true.
+ # Enable ip-lockdown to block access to the instance metadata. Defaults to
+ # true.
enable_ip_lockdown = true
# Set to true to add IAM permissions for ssh-grunt
- # (https://github.com/gruntwork-io/terraform-aws-security/tree/master/modules/ssh-
- # runt), which will allow you to manage SSH access via IAM groups.
+ # (https://github.com/gruntwork-io/terraform-aws-security/tree/master/modules/ssh-grunt),
+ # which will allow you to manage SSH access via IAM groups.
enable_ssh_grunt = true
- # If you are using ssh-grunt and your IAM users / groups are defined in a separate
- # AWS account, you can use this variable to specify the ARN of an IAM role that
- # ssh-grunt can assume to retrieve IAM group and public SSH key info from that
- # account. To omit this variable, set it to an empty string (do NOT use null, or
- # Terraform will complain).
+ # If you are using ssh-grunt and your IAM users / groups are defined in a
+ # separate AWS account, you can use this variable to specify the ARN of an IAM
+ # role that ssh-grunt can assume to retrieve IAM group and public SSH key info
+ # from that account. To omit this variable, set it to an empty string (do NOT
+ # use null, or Terraform will complain).
external_account_ssh_grunt_role_arn = ""
- # The apex domain of the hostname for the EC2 instance (e.g., example.com). The
- # complete hostname for the EC2 instance will be
- # var.name.var.fully_qualified_domain_name (e.g., bastion.example.com). Only used
- # if create_dns_record is true.
+ # The apex domain of the hostname for the EC2 instance (e.g., example.com).
+ # The complete hostname for the EC2 instance will be
+ # var.name.var.fully_qualified_domain_name (e.g., bastion.example.com). Only
+ # used if create_dns_record is true.
fully_qualified_domain_name = ""
- # The period, in seconds, over which to measure the CPU utilization percentage for
- # the instance.
+ # The period, in seconds, over which to measure the CPU utilization percentage
+ # for the instance.
high_instance_cpu_utilization_period = 60
- # Trigger an alarm if the EC2 instance has a CPU utilization percentage above this
- # threshold.
+ # Trigger an alarm if the EC2 instance has a CPU utilization percentage above
+ # this threshold.
high_instance_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_instance_cpu_utilization_treat_missing_data = "missing"
# The period, in seconds, over which to measure the root disk utilization
@@ -635,54 +644,56 @@ inputs = {
# above this threshold.
high_instance_disk_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_instance_disk_utilization_treat_missing_data = "missing"
- # The period, in seconds, over which to measure the Memory utilization percentage
- # for the instance.
+ # The period, in seconds, over which to measure the Memory utilization
+ # percentage for the instance.
high_instance_memory_utilization_period = 60
- # Trigger an alarm if the EC2 instance has a Memory utilization percentage above
- # this threshold.
+ # Trigger an alarm if the EC2 instance has a Memory utilization percentage
+ # above this threshold.
high_instance_memory_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_instance_memory_utilization_treat_missing_data = "missing"
# The name for the bastion host's IAM role and instance profile. If set to an
# empty string, will use var.name. Required when create_iam_role is false.
iam_role_name = ""
- # The name of a Key Pair that can be used to SSH to this instance. This instance
- # may have ssh-grunt installed. The preferred way to do SSH access is with your
- # own IAM user name and SSH key. This Key Pair is only as a fallback.
+ # The name of a Key Pair that can be used to SSH to this instance. This
+ # instance may have ssh-grunt installed. The preferred way to do SSH access is
+ # with your own IAM user name and SSH key. This Key Pair is only as a
+ # fallback.
keypair_name = null
# Whether the metadata service is available. Valid values include enabled or
# disabled. Defaults to enabled.
metadata_http_endpoint = "enabled"
- # Desired HTTP PUT response hop limit for instance metadata requests. The larger
- # the number, the further instance metadata requests can travel. Valid values are
- # integer from 1 to 64. Defaults to 1.
+ # Desired HTTP PUT response hop limit for instance metadata requests. The
+ # larger the number, the further instance metadata requests can travel. Valid
+ # values are integer from 1 to 64. Defaults to 1.
metadata_http_put_response_hop_limit = 1
- # Whether or not the metadata service requires session tokens, also referred to as
- # Instance Metadata Service Version 2 (IMDSv2). Valid values include optional or
- # required. Defaults to optional.
+ # Whether or not the metadata service requires session tokens, also referred
+ # to as Instance Metadata Service Version 2 (IMDSv2). Valid values include
+ # optional or required. Defaults to optional.
metadata_http_tokens = "optional"
- # Enables or disables access to instance tags from the instance metadata service.
- # Valid values include enabled or disabled. Defaults to disabled.
+ # Enables or disables access to instance tags from the instance metadata
+ # service. Valid values include enabled or disabled. Defaults to disabled.
metadata_tags = "disabled"
- # If set to true, the root volume will be deleted when the Instance is terminated.
+ # If set to true, the root volume will be deleted when the Instance is
+ # terminated.
root_volume_delete_on_termination = true
# The size of the root volume, in gigabytes.
@@ -691,38 +702,39 @@ inputs = {
# The root volume type. Must be one of: standard, gp2, io1.
root_volume_type = "standard"
- # A list of secondary private IPv4 addresses to assign to the instance's primary
- # network interface (eth0) in a VPC
+ # A list of secondary private IPv4 addresses to assign to the instance's
+ # primary network interface (eth0) in a VPC
secondary_private_ips = []
- # When true, precreate the CloudWatch Log Group to use for log aggregation from
- # the EC2 instances. This is useful if you wish to customize the CloudWatch Log
- # Group with various settings such as retention periods and KMS encryption. When
- # false, the CloudWatch agent will automatically create a basic log group to use.
+ # When true, precreate the CloudWatch Log Group to use for log aggregation
+ # from the EC2 instances. This is useful if you wish to customize the
+ # CloudWatch Log Group with various settings such as retention periods and KMS
+ # encryption. When false, the CloudWatch agent will automatically create a
+ # basic log group to use.
should_create_cloudwatch_log_group = true
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to this EC2 instance. To omit this variable, set it to an
- # empty string (do NOT use null, or Terraform will complain).
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to this EC2 instance. To omit this variable,
+ # set it to an empty string (do NOT use null, or Terraform will complain).
ssh_grunt_iam_group = ""
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to this EC2 instance. To omit this variable, set it to an
- # empty string (do NOT use null, or Terraform will complain).
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to this EC2 instance. To omit this variable,
+ # set it to an empty string (do NOT use null, or Terraform will complain).
ssh_grunt_iam_group_sudo = ""
- # A map of tags to apply to the EC2 instance and the S3 Buckets. The key is the
- # tag name and the value is the tag value.
+ # A map of tags to apply to the EC2 instance and the S3 Buckets. The key is
+ # the tag name and the value is the tag value.
tags = {}
# The tenancy of this instance. Must be one of: default, dedicated, or host.
tenancy = "default"
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
}
@@ -1444,11 +1456,11 @@ The input parameters for the EBS volumes.
diff --git a/docs/reference/services/app-orchestration/helm-service.md b/docs/reference/services/app-orchestration/helm-service.md
index 256e0ba37a..e78580fef6 100644
--- a/docs/reference/services/app-orchestration/helm-service.md
+++ b/docs/reference/services/app-orchestration/helm-service.md
@@ -15,11 +15,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Helm Service
-View Source
+View Source
Release Notes
@@ -63,9 +63,9 @@ If you’ve never used the Service Catalog before, make sure to read
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/test): Automated tests for the modules and examples.
## Deploy
@@ -73,7 +73,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -81,7 +81,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -100,7 +100,7 @@ If you want to deploy this repo in production, check out the following resources
module "helm_service" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/helm-service?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/helm-service?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -110,11 +110,11 @@ module "helm_service" {
# Kubernetes resources.
application_name =
- # Chart name to be installed. The chart name can be local path, a URL to a chart,
- # or the name of the chart if repository is specified. It is also possible to use
- # the / format here if you are running Terraform on a system
- # that the repository has been added to with helm repo add but this is not
- # recommended.
+ # Chart name to be installed. The chart name can be local path, a URL to a
+ # chart, or the name of the chart if repository is specified. It is also
+ # possible to use the / format here if you are running
+ # Terraform on a system that the repository has been added to with helm repo
+ # add but this is not recommended.
helm_chart =
# Repository URL where to locate the requested chart.
@@ -127,45 +127,45 @@ module "helm_service" {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Configuration for using the IAM role with Service Accounts feature to provide
- # permissions to the applications. This expects a map with two properties:
- # `openid_connect_provider_arn` and `openid_connect_provider_url`. The
- # `openid_connect_provider_arn` is the ARN of the OpenID Connect Provider for EKS
- # to retrieve IAM credentials, while `openid_connect_provider_url` is the URL.
- # Leave as an empty string if you do not wish to use IAM role with Service
- # Accounts.
+ # Configuration for using the IAM role with Service Accounts feature to
+ # provide permissions to the applications. This expects a map with two
+ # properties: `openid_connect_provider_arn` and `openid_connect_provider_url`.
+ # The `openid_connect_provider_arn` is the ARN of the OpenID Connect Provider
+ # for EKS to retrieve IAM credentials, while `openid_connect_provider_url` is
+ # the URL. Leave as an empty string if you do not wish to use IAM role with
+ # Service Accounts.
eks_iam_role_for_service_accounts_config = null
# Map of values to pass to the Helm chart. Leave empty to use chart default
# values.
helm_chart_values = {}
- # Specify the exact chart version to install. If this is not specified, the latest
- # version is installed.
+ # Specify the exact chart version to install. If this is not specified, the
+ # latest version is installed.
helm_chart_version = null
- # An object defining the policy to attach to `iam_role_name` if the IAM role is
- # going to be created. Accepts a map of objects, where the map keys are sids for
- # IAM policy statements, and the object fields are the resources, actions, and the
- # effect ("Allow" or "Deny") of the statement. Ignored if `iam_role_arn` is
- # provided. Leave as null if you do not wish to use IAM role with Service
- # Accounts.
+ # An object defining the policy to attach to `iam_role_name` if the IAM role
+ # is going to be created. Accepts a map of objects, where the map keys are
+ # sids for IAM policy statements, and the object fields are the resources,
+ # actions, and the effect ("Allow" or "Deny") of the statement. Ignored if
+ # `iam_role_arn` is provided. Leave as null if you do not wish to use IAM role
+ # with Service Accounts.
iam_policy = null
# Whether or not the IAM role passed in `iam_role_name` already exists. Set to
# true if it exists, or false if it needs to be created. Defaults to false.
iam_role_exists = false
- # The name of an IAM role that will be used by the pod to access the AWS API. If
- # `iam_role_exists` is set to false, this role will be created. Leave as an empty
- # string if you do not wish to use IAM role with Service Accounts.
+ # The name of an IAM role that will be used by the pod to access the AWS API.
+ # If `iam_role_exists` is set to false, this role will be created. Leave as an
+ # empty string if you do not wish to use IAM role with Service Accounts.
iam_role_name = ""
# The name of a service account to create for use with the Pods. This service
- # account will be mapped to the IAM role defined in `var.iam_role_name` to give
- # the pod permissions to access the AWS API. Must be unique in this namespace.
- # Leave as an empty string if you do not wish to assign a Service Account to the
- # Pods.
+ # account will be mapped to the IAM role defined in `var.iam_role_name` to
+ # give the pod permissions to access the AWS API. Must be unique in this
+ # namespace. Leave as an empty string if you do not wish to assign a Service
+ # Account to the Pods.
service_account_name = ""
# Sleep for 30 seconds to allow Kubernetes time to remove associated AWS
@@ -195,7 +195,7 @@ module "helm_service" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/helm-service?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/helm-service?ref=v0.104.12"
}
inputs = {
@@ -208,11 +208,11 @@ inputs = {
# Kubernetes resources.
application_name =
- # Chart name to be installed. The chart name can be local path, a URL to a chart,
- # or the name of the chart if repository is specified. It is also possible to use
- # the / format here if you are running Terraform on a system
- # that the repository has been added to with helm repo add but this is not
- # recommended.
+ # Chart name to be installed. The chart name can be local path, a URL to a
+ # chart, or the name of the chart if repository is specified. It is also
+ # possible to use the / format here if you are running
+ # Terraform on a system that the repository has been added to with helm repo
+ # add but this is not recommended.
helm_chart =
# Repository URL where to locate the requested chart.
@@ -225,45 +225,45 @@ inputs = {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Configuration for using the IAM role with Service Accounts feature to provide
- # permissions to the applications. This expects a map with two properties:
- # `openid_connect_provider_arn` and `openid_connect_provider_url`. The
- # `openid_connect_provider_arn` is the ARN of the OpenID Connect Provider for EKS
- # to retrieve IAM credentials, while `openid_connect_provider_url` is the URL.
- # Leave as an empty string if you do not wish to use IAM role with Service
- # Accounts.
+ # Configuration for using the IAM role with Service Accounts feature to
+ # provide permissions to the applications. This expects a map with two
+ # properties: `openid_connect_provider_arn` and `openid_connect_provider_url`.
+ # The `openid_connect_provider_arn` is the ARN of the OpenID Connect Provider
+ # for EKS to retrieve IAM credentials, while `openid_connect_provider_url` is
+ # the URL. Leave as an empty string if you do not wish to use IAM role with
+ # Service Accounts.
eks_iam_role_for_service_accounts_config = null
# Map of values to pass to the Helm chart. Leave empty to use chart default
# values.
helm_chart_values = {}
- # Specify the exact chart version to install. If this is not specified, the latest
- # version is installed.
+ # Specify the exact chart version to install. If this is not specified, the
+ # latest version is installed.
helm_chart_version = null
- # An object defining the policy to attach to `iam_role_name` if the IAM role is
- # going to be created. Accepts a map of objects, where the map keys are sids for
- # IAM policy statements, and the object fields are the resources, actions, and the
- # effect ("Allow" or "Deny") of the statement. Ignored if `iam_role_arn` is
- # provided. Leave as null if you do not wish to use IAM role with Service
- # Accounts.
+ # An object defining the policy to attach to `iam_role_name` if the IAM role
+ # is going to be created. Accepts a map of objects, where the map keys are
+ # sids for IAM policy statements, and the object fields are the resources,
+ # actions, and the effect ("Allow" or "Deny") of the statement. Ignored if
+ # `iam_role_arn` is provided. Leave as null if you do not wish to use IAM role
+ # with Service Accounts.
iam_policy = null
# Whether or not the IAM role passed in `iam_role_name` already exists. Set to
# true if it exists, or false if it needs to be created. Defaults to false.
iam_role_exists = false
- # The name of an IAM role that will be used by the pod to access the AWS API. If
- # `iam_role_exists` is set to false, this role will be created. Leave as an empty
- # string if you do not wish to use IAM role with Service Accounts.
+ # The name of an IAM role that will be used by the pod to access the AWS API.
+ # If `iam_role_exists` is set to false, this role will be created. Leave as an
+ # empty string if you do not wish to use IAM role with Service Accounts.
iam_role_name = ""
# The name of a service account to create for use with the Pods. This service
- # account will be mapped to the IAM role defined in `var.iam_role_name` to give
- # the pod permissions to access the AWS API. Must be unique in this namespace.
- # Leave as an empty string if you do not wish to assign a Service Account to the
- # Pods.
+ # account will be mapped to the IAM role defined in `var.iam_role_name` to
+ # give the pod permissions to access the AWS API. Must be unique in this
+ # namespace. Leave as an empty string if you do not wish to assign a Service
+ # Account to the Pods.
service_account_name = ""
# Sleep for 30 seconds to allow Kubernetes time to remove associated AWS
@@ -483,11 +483,11 @@ Number of seconds to wait for Pods to become healthy before marking the deployme
diff --git a/docs/reference/services/app-orchestration/karpenter.md b/docs/reference/services/app-orchestration/karpenter.md
new file mode 100644
index 0000000000..3545f2c30d
--- /dev/null
+++ b/docs/reference/services/app-orchestration/karpenter.md
@@ -0,0 +1,624 @@
+---
+type: "service"
+name: "Karpenter"
+description: "Deploy Karpenter to an Amazon Elastic Kubernetes Service (EKS) cluster."
+category: "docker-orchestration"
+cloud: "aws"
+tags: ["docker","orchestration","kubernetes","containers"]
+license: "gruntwork"
+built-with: "terraform, helm"
+title: "EKS Karpenter"
+hide_title: true
+---
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+import VersionBadge from '../../../../src/components/VersionBadge.tsx';
+import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
+
+
+
+# EKS Karpenter
+
+View Source
+
+Release Notes
+
+## Overview
+
+This service contains [Terraform](https://www.terraform.io) code to deploy [Karpenter](https://karpenter.sh/) to
+[Elastic Kubernetes Service(EKS)](https://docs.aws.amazon.com/eks/latest/userguide/clusters.html).
+
+> From the Karpenter Project:
+>
+> Karpenter automatically launches just the right compute resources to handle your cluster's applications. It is designed to let you take full advantage of the cloud with fast and simple compute provisioning for Kubernetes clusters.
+
+## Features
+
+* Creates the required resources to deploy Karpenter to EKS
+
+## Learn
+
+:::note
+
+This repo is a part of the [Gruntwork Service Catalog](https://github.com/gruntwork-io/terraform-aws-service-catalog/),
+a collection of reusable, battle-tested, production ready infrastructure code.
+If you’ve never used the Service Catalog before, make sure to read
+[How to use the Gruntwork Service Catalog](https://docs.gruntwork.io/reference/services/intro/overview)!
+
+:::
+
+Under the hood, this is all implemented using Terraform modules from the Gruntwork
+[terraform-aws-eks](https://github.com/gruntwork-io/terraform-aws-eks) repo. If you are a subscriber and don’t have
+access to this repo, email .
+
+### Core concepts
+
+For detailed information on how Karpenter is deployed to EKS, see the documentation in the
+[terraform-aws-eks](https://github.com/gruntwork-io/terraform-aws-eks) repo.
+
+* [Karpenter](https://github.com/gruntwork-io/terraform-aws-eks/tree/master/modules/eks-k8s-karpenter)
+
+### Repo organization
+
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/test): Automated tests for the modules and examples.
+
+## Deploy
+
+### Non-production deployment (quick start for learning)
+
+If you just want to try this repo out for experimenting and learning, check out the following resources:
+
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
+ `examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
+ testing (but not direct production usage).
+
+### Production deployment
+
+If you want to deploy this repo in production, check out the following resources:
+
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
+ optimized for direct usage in production. This is code from the
+ [Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
+ end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
+
+* [How to deploy a production-grade Kubernetes cluster on AWS](https://docs.gruntwork.io/guides/build-it-yourself/kubernetes-cluster/deployment-walkthrough/pre-requisites):
+ A step-by-step guide for deploying a production-grade EKS cluster on AWS using the code in this repo.
+
+
+## Sample Usage
+
+
+
+
+```hcl title="main.tf"
+
+# ------------------------------------------------------------------------------------------------------
+# DEPLOY GRUNTWORK'S EKS-KARPENTER MODULE
+# ------------------------------------------------------------------------------------------------------
+
+module "eks_karpenter" {
+
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/eks-karpenter?ref=v0.104.12"
+
+ # ----------------------------------------------------------------------------------------------------
+ # REQUIRED VARIABLES
+ # ----------------------------------------------------------------------------------------------------
+
+ # The AWS region in which all resources will be created
+ aws_region =
+
+ # URL endpoint of the Kubernetes control plane provided by EKS.
+ eks_cluster_endpoint =
+
+ # The name of the EKS cluster where the core services will be deployed into.
+ eks_cluster_name =
+
+ # The ARN of the EKS OIDC provider. This is required if creating IRSA for the
+ # Karpenter Controller.
+ eks_openid_connect_provider_arn =
+
+ # The URL of the EKS OIDC provider. This is required if creating IRSA for the
+ # Karpenter Controller.
+ eks_openid_connect_provider_url =
+
+ # ----------------------------------------------------------------------------------------------------
+ # OPTIONAL VARIABLES
+ # ----------------------------------------------------------------------------------------------------
+
+ # Optionally create an IAM Role for Service Account (IRSA) for the Karpenter
+ # Controller.
+ create_karpenter_controller_irsa = true
+
+ # Conditional flag to create the Karpenter Node IAM Role. If this is set to
+ # false, then an existing IAM Role must be provided with the
+ # `karpenter_node_iam_role_arn` variable
+ create_karpenter_node_iam_role = true
+
+ # Conditional flag to optionally create resources in this module.
+ create_resources = true
+
+ # The Helm chart name for the Karpenter chart.
+ karpenter_chart_name = "karpenter"
+
+ # The k8s namespace that the Karpenter Helm chart will be deployed to.
+ karpenter_chart_namespace = "karpenter"
+
+ # The Helm release name for the Karpenter chart.
+ karpenter_chart_release_name = "karpenter"
+
+ # The Helm repository to obtain the Karpenter chart from.
+ karpenter_chart_repository = "oci://public.ecr.aws/karpenter"
+
+ # The version of the Karpenter Helm chart.
+ karpenter_chart_version = "v0.24.0"
+
+ # Provide an existing IAM Role ARN to be used with the Karpenter Controller
+ # Service Account. This is required if `create_karpenter_controller_irsa` is
+ # set to false.
+ karpenter_controller_existing_role_arn = true
+
+ # A tag that is used by Karpenter to discover resources.
+ karpenter_discovery_tag = "karpenter.sh/discovery"
+
+ # ARN of the policy that is used to set the permissions boundary for the role.
+ karpenter_irsa_permissions_boundary = null
+
+ # Use an existing IAM Role to be used for the Karpenter Node Instance Profile.
+ # This is required if `create_karpenter_node_iam_role` is set to false. This
+ # should be the ARN of the IAM Role.
+ karpenter_node_existing_iam_role_arn = null
+
+ # Use an existing IAM Role to be used for the Karpenter Node Instance Profile.
+ # This is required if `create_karpenter_node_iam_role` is set to false. This
+ # should be the Name of the IAM Role.
+ karpenter_node_existing_iam_role_name = null
+
+ # A description of the Karpenter Node IAM Role.
+ karpenter_node_iam_role_description = "IAM Role attached to nodes launched by Karpenter."
+
+ # Maximum session duration (in seconds) that you want to set for the Karpenter
+ # Node role. Value can be between 3600 and 43200.
+ karpenter_node_iam_role_max_session_duration = 3600
+
+ # Optionally provide a name for the Karpenter Node IAM Role. If unset, a name
+ # will be generated.
+ karpenter_node_iam_role_name = null
+
+ # Optionally provide a path to the Karpenter Node IAM Role.
+ karpenter_node_iam_role_path = null
+
+ # ARN of the policy that is used to set the permissions boundary for the role.
+ karpenter_node_iam_role_permissions_boundary = null
+
+ # Additional tags to add to the Karpenter Node IAM Role.
+ karpenter_node_iam_role_tags = {}
+
+ # Optionally use an IAM name prefix for the Karpenter IAM Role.
+ karpenter_node_iam_role_use_name_prefix = false
+
+ # Optionally provide a name for the Karpenter service account that will be
+ # associated with IRSA.
+ karpenter_service_account_name = "karpenter"
+
+}
+
+
+```
+
+
+
+
+```hcl title="terragrunt.hcl"
+
+# ------------------------------------------------------------------------------------------------------
+# DEPLOY GRUNTWORK'S EKS-KARPENTER MODULE
+# ------------------------------------------------------------------------------------------------------
+
+terraform {
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/eks-karpenter?ref=v0.104.12"
+}
+
+inputs = {
+
+ # ----------------------------------------------------------------------------------------------------
+ # REQUIRED VARIABLES
+ # ----------------------------------------------------------------------------------------------------
+
+ # The AWS region in which all resources will be created
+ aws_region =
+
+ # URL endpoint of the Kubernetes control plane provided by EKS.
+ eks_cluster_endpoint =
+
+ # The name of the EKS cluster where the core services will be deployed into.
+ eks_cluster_name =
+
+ # The ARN of the EKS OIDC provider. This is required if creating IRSA for the
+ # Karpenter Controller.
+ eks_openid_connect_provider_arn =
+
+ # The URL of the EKS OIDC provider. This is required if creating IRSA for the
+ # Karpenter Controller.
+ eks_openid_connect_provider_url =
+
+ # ----------------------------------------------------------------------------------------------------
+ # OPTIONAL VARIABLES
+ # ----------------------------------------------------------------------------------------------------
+
+ # Optionally create an IAM Role for Service Account (IRSA) for the Karpenter
+ # Controller.
+ create_karpenter_controller_irsa = true
+
+ # Conditional flag to create the Karpenter Node IAM Role. If this is set to
+ # false, then an existing IAM Role must be provided with the
+ # `karpenter_node_iam_role_arn` variable
+ create_karpenter_node_iam_role = true
+
+ # Conditional flag to optionally create resources in this module.
+ create_resources = true
+
+ # The Helm chart name for the Karpenter chart.
+ karpenter_chart_name = "karpenter"
+
+ # The k8s namespace that the Karpenter Helm chart will be deployed to.
+ karpenter_chart_namespace = "karpenter"
+
+ # The Helm release name for the Karpenter chart.
+ karpenter_chart_release_name = "karpenter"
+
+ # The Helm repository to obtain the Karpenter chart from.
+ karpenter_chart_repository = "oci://public.ecr.aws/karpenter"
+
+ # The version of the Karpenter Helm chart.
+ karpenter_chart_version = "v0.24.0"
+
+ # Provide an existing IAM Role ARN to be used with the Karpenter Controller
+ # Service Account. This is required if `create_karpenter_controller_irsa` is
+ # set to false.
+ karpenter_controller_existing_role_arn = true
+
+ # A tag that is used by Karpenter to discover resources.
+ karpenter_discovery_tag = "karpenter.sh/discovery"
+
+ # ARN of the policy that is used to set the permissions boundary for the role.
+ karpenter_irsa_permissions_boundary = null
+
+ # Use an existing IAM Role to be used for the Karpenter Node Instance Profile.
+ # This is required if `create_karpenter_node_iam_role` is set to false. This
+ # should be the ARN of the IAM Role.
+ karpenter_node_existing_iam_role_arn = null
+
+ # Use an existing IAM Role to be used for the Karpenter Node Instance Profile.
+ # This is required if `create_karpenter_node_iam_role` is set to false. This
+ # should be the Name of the IAM Role.
+ karpenter_node_existing_iam_role_name = null
+
+ # A description of the Karpenter Node IAM Role.
+ karpenter_node_iam_role_description = "IAM Role attached to nodes launched by Karpenter."
+
+ # Maximum session duration (in seconds) that you want to set for the Karpenter
+ # Node role. Value can be between 3600 and 43200.
+ karpenter_node_iam_role_max_session_duration = 3600
+
+ # Optionally provide a name for the Karpenter Node IAM Role. If unset, a name
+ # will be generated.
+ karpenter_node_iam_role_name = null
+
+ # Optionally provide a path to the Karpenter Node IAM Role.
+ karpenter_node_iam_role_path = null
+
+ # ARN of the policy that is used to set the permissions boundary for the role.
+ karpenter_node_iam_role_permissions_boundary = null
+
+ # Additional tags to add to the Karpenter Node IAM Role.
+ karpenter_node_iam_role_tags = {}
+
+ # Optionally use an IAM name prefix for the Karpenter IAM Role.
+ karpenter_node_iam_role_use_name_prefix = false
+
+ # Optionally provide a name for the Karpenter service account that will be
+ # associated with IRSA.
+ karpenter_service_account_name = "karpenter"
+
+}
+
+
+```
+
+
+
+
+
+
+## Reference
+
+
+
+
+
+### Required
+
+
+
+
+The AWS region in which all resources will be created
+
+
+
+
+
+
+
+URL endpoint of the Kubernetes control plane provided by EKS.
+
+
+
+
+
+
+
+The name of the EKS cluster where the core services will be deployed into.
+
+
+
+
+
+
+
+The ARN of the EKS OIDC provider. This is required if creating IRSA for the Karpenter Controller.
+
+
+
+
+
+
+
+The URL of the EKS OIDC provider. This is required if creating IRSA for the Karpenter Controller.
+
+
+
+
+### Optional
+
+
+
+
+Optionally create an IAM Role for Service Account (IRSA) for the Karpenter Controller.
+
+
+
+
+
+
+
+
+Conditional flag to create the Karpenter Node IAM Role. If this is set to false, then an existing IAM Role must be provided with the `karpenter_node_iam_role_arn` variable
+
+
+
+
+
+
+
+
+Conditional flag to optionally create resources in this module.
+
+
+
+
+
+
+
+
+The Helm chart name for the Karpenter chart.
+
+
+
+
+
+
+
+
+The k8s namespace that the Karpenter Helm chart will be deployed to.
+
+
+
+
+
+
+
+
+The Helm release name for the Karpenter chart.
+
+
+
+
+
+
+
+
+The Helm repository to obtain the Karpenter chart from.
+
+
+
+
+
+
+
+
+The version of the Karpenter Helm chart.
+
+
+
+
+
+
+
+
+Provide an existing IAM Role ARN to be used with the Karpenter Controller Service Account. This is required if `create_karpenter_controller_irsa` is set to false.
+
+
+
+
+
+
+
+
+A tag that is used by Karpenter to discover resources.
+
+
+
+
+
+
+
+
+ARN of the policy that is used to set the permissions boundary for the role.
+
+
+
+
+
+
+
+
+Use an existing IAM Role to be used for the Karpenter Node Instance Profile. This is required if `create_karpenter_node_iam_role` is set to false. This should be the ARN of the IAM Role.
+
+
+
+
+
+
+
+
+Use an existing IAM Role to be used for the Karpenter Node Instance Profile. This is required if `create_karpenter_node_iam_role` is set to false. This should be the Name of the IAM Role.
+
+
+
+
+
+
+
+
+A description of the Karpenter Node IAM Role.
+
+
+
+
+
+
+
+
+Maximum session duration (in seconds) that you want to set for the Karpenter Node role. Value can be between 3600 and 43200.
+
+
+
+
+
+
+
+
+Optionally provide a name for the Karpenter Node IAM Role. If unset, a name will be generated.
+
+
+
+
+
+
+
+
+Optionally provide a path to the Karpenter Node IAM Role.
+
+
+
+
+
+
+
+
+ARN of the policy that is used to set the permissions boundary for the role.
+
+
+
+
+
+
+
+
+Additional tags to add to the Karpenter Node IAM Role.
+
+
+
+
+
+
+
+
+Optionally use an IAM name prefix for the Karpenter IAM Role.
+
+
+
+
+
+
+
+
+Optionally provide a name for the Karpenter service account that will be associated with IRSA.
+
+
+
+
+
+
+
+
+
+
+
+The ARN of the Karpenter Controller IRSA Role.
+
+
+
+
+
+
+
+The Name of the Karpenter Controller IRSA Role.
+
+
+
+
+
+
+
+The ARN of the Karpenter Node IAM Role.
+
+
+
+
+
+
+
+The name of the Karpenter Node IAM Role.
+
+
+
+
+
+
+
+
+
diff --git a/docs/reference/services/app-orchestration/kubernetes-namespace.md b/docs/reference/services/app-orchestration/kubernetes-namespace.md
index d8350fc501..e8dbeb6a83 100644
--- a/docs/reference/services/app-orchestration/kubernetes-namespace.md
+++ b/docs/reference/services/app-orchestration/kubernetes-namespace.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Kubernetes Namespace
-View Source
+View Source
Release Notes
@@ -65,9 +65,9 @@ subscriber and don’t have access to this repo, email .
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/test): Automated tests for the modules and examples.
## Deploy
@@ -75,7 +75,7 @@ subscriber and don’t have access to this repo, email .
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -83,7 +83,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -102,7 +102,7 @@ If you want to deploy this repo in production, check out the following resources
module "k_8_s_namespace" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/k8s-namespace?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/k8s-namespace?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -115,9 +115,9 @@ module "k_8_s_namespace" {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Map of string key default pairs that can be used to store arbitrary metadata on
- # the namespace and roles. See the Kubernetes Reference for more info
- # (https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
+ # Map of string key default pairs that can be used to store arbitrary metadata
+ # on the namespace and roles. See the Kubernetes Reference for more info
+ # (https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/).
annotations = {}
# Name of the EKS cluster where the Namespace will be created. Required when
@@ -127,8 +127,8 @@ module "k_8_s_namespace" {
# The list of RBAC entities that should have full access to the Namespace.
full_access_rbac_entities = []
- # Map of string key value pairs that can be used to organize and categorize the
- # namespace and roles. See the Kubernetes Reference for more info
+ # Map of string key value pairs that can be used to organize and categorize
+ # the namespace and roles. See the Kubernetes Reference for more info
# (https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/).
labels = {}
@@ -136,17 +136,19 @@ module "k_8_s_namespace" {
# var.schedule_pods_on_fargate is true.
pod_execution_iam_role_arn = null
- # The list of RBAC entities that should have read only access to the Namespace.
+ # The list of RBAC entities that should have read only access to the
+ # Namespace.
read_only_access_rbac_entities = []
- # When true, will create a Fargate Profile that matches all Pods in the Namespace.
- # This means that all Pods in the Namespace will be scheduled on Fargate. Note
- # that this value is only used if var.kubeconfig_auth_type is eks, as Fargate
- # profiles can only be created against EKS clusters.
+ # When true, will create a Fargate Profile that matches all Pods in the
+ # Namespace. This means that all Pods in the Namespace will be scheduled on
+ # Fargate. Note that this value is only used if var.kubeconfig_auth_type is
+ # eks, as Fargate profiles can only be created against EKS clusters.
schedule_pods_on_fargate = false
- # The subnet IDs to use for EKS worker nodes. Used when provisioning Pods on to
- # Fargate. At least 1 subnet is required if var.schedule_pods_on_fargate is true.
+ # The subnet IDs to use for EKS worker nodes. Used when provisioning Pods on
+ # to Fargate. At least 1 subnet is required if var.schedule_pods_on_fargate is
+ # true.
worker_vpc_subnet_ids = []
}
@@ -164,7 +166,7 @@ module "k_8_s_namespace" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/k8s-namespace?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/k8s-namespace?ref=v0.104.12"
}
inputs = {
@@ -180,9 +182,9 @@ inputs = {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Map of string key default pairs that can be used to store arbitrary metadata on
- # the namespace and roles. See the Kubernetes Reference for more info
- # (https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
+ # Map of string key default pairs that can be used to store arbitrary metadata
+ # on the namespace and roles. See the Kubernetes Reference for more info
+ # (https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/).
annotations = {}
# Name of the EKS cluster where the Namespace will be created. Required when
@@ -192,8 +194,8 @@ inputs = {
# The list of RBAC entities that should have full access to the Namespace.
full_access_rbac_entities = []
- # Map of string key value pairs that can be used to organize and categorize the
- # namespace and roles. See the Kubernetes Reference for more info
+ # Map of string key value pairs that can be used to organize and categorize
+ # the namespace and roles. See the Kubernetes Reference for more info
# (https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/).
labels = {}
@@ -201,17 +203,19 @@ inputs = {
# var.schedule_pods_on_fargate is true.
pod_execution_iam_role_arn = null
- # The list of RBAC entities that should have read only access to the Namespace.
+ # The list of RBAC entities that should have read only access to the
+ # Namespace.
read_only_access_rbac_entities = []
- # When true, will create a Fargate Profile that matches all Pods in the Namespace.
- # This means that all Pods in the Namespace will be scheduled on Fargate. Note
- # that this value is only used if var.kubeconfig_auth_type is eks, as Fargate
- # profiles can only be created against EKS clusters.
+ # When true, will create a Fargate Profile that matches all Pods in the
+ # Namespace. This means that all Pods in the Namespace will be scheduled on
+ # Fargate. Note that this value is only used if var.kubeconfig_auth_type is
+ # eks, as Fargate profiles can only be created against EKS clusters.
schedule_pods_on_fargate = false
- # The subnet IDs to use for EKS worker nodes. Used when provisioning Pods on to
- # Fargate. At least 1 subnet is required if var.schedule_pods_on_fargate is true.
+ # The subnet IDs to use for EKS worker nodes. Used when provisioning Pods on
+ # to Fargate. At least 1 subnet is required if var.schedule_pods_on_fargate is
+ # true.
worker_vpc_subnet_ids = []
}
@@ -424,11 +428,11 @@ The name of the rbac role that grants read only permissions on the namespace.
diff --git a/docs/reference/services/app-orchestration/kubernetes-service.md b/docs/reference/services/app-orchestration/kubernetes-service.md
index f507892a90..cf2a24cd83 100644
--- a/docs/reference/services/app-orchestration/kubernetes-service.md
+++ b/docs/reference/services/app-orchestration/kubernetes-service.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Kubernetes Service
-View Source
+View Source
Release Notes
@@ -74,9 +74,9 @@ don’t have access to this repo, email .
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/test): Automated tests for the modules and examples.
## Deploy
@@ -84,7 +84,7 @@ don’t have access to this repo, email .
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -92,7 +92,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -111,7 +111,7 @@ If you want to deploy this repo in production, check out the following resources
module "k_8_s_service" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/k8s-service?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/k8s-service?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -142,43 +142,44 @@ module "k_8_s_service" {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Map of additional ports to expose for the container. The key is the name of the
- # port and value contains port number and protocol.
+ # Map of additional ports to expose for the container. The key is the name of
+ # the port and value contains port number and protocol.
additional_ports = null
- # A list of ACM certificate ARNs to attach to the ALB. The first certificate in
- # the list will be added as default certificate.
+ # A list of ACM certificate ARNs to attach to the ALB. The first certificate
+ # in the list will be added as default certificate.
alb_acm_certificate_arns = []
- # The number of consecutive health check successes required before considering an
- # unhealthy target healthy.
+ # The number of consecutive health check successes required before considering
+ # an unhealthy target healthy.
alb_health_check_healthy_threshold = 2
# Interval between ALB health checks in seconds.
alb_health_check_interval = 30
- # URL path for the endpoint that the ALB health check should ping. Defaults to /.
+ # URL path for the endpoint that the ALB health check should ping. Defaults to
+ # /.
alb_health_check_path = "/"
# String value specifying the port that the ALB health check should probe. By
- # default, this will be set to the traffic port (the NodePort or port where the
- # service receives traffic). This can also be set to a Kubernetes named port, or
- # direct integer value. See
- # https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.3/guide/ingres
- # /annotations/#healthcheck-port for more information.
+ # default, this will be set to the traffic port (the NodePort or port where
+ # the service receives traffic). This can also be set to a Kubernetes named
+ # port, or direct integer value. See
+ # https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.3/guide/ingress/annotations/#healthcheck-port
+ # for more information.
alb_health_check_port = "traffic-port"
- # Protocol (HTTP or HTTPS) that the ALB health check should use to connect to the
- # application container.
+ # Protocol (HTTP or HTTPS) that the ALB health check should use to connect to
+ # the application container.
alb_health_check_protocol = "HTTP"
- # The HTTP status code that should be expected when doing health checks against
- # the specified health check path. Accepts a single value (200), multiple values
- # (200,201), or a range of values (200-300).
+ # The HTTP status code that should be expected when doing health checks
+ # against the specified health check path. Accepts a single value (200),
+ # multiple values (200,201), or a range of values (200-300).
alb_health_check_success_codes = "200"
- # The timeout, in seconds, during which no response from a target means a failed
- # health check.
+ # The timeout, in seconds, during which no response from a target means a
+ # failed health check.
alb_health_check_timeout = 10
# The Docker image to use for the canary. Required if
@@ -188,167 +189,169 @@ module "k_8_s_service" {
# Allow deletion of new resources created in this upgrade when upgrade fails.
cleanup_on_fail = null
- # Kubernetes ConfigMaps to be injected into the container. Each entry in the map
- # represents a ConfigMap to be injected, with the key representing the name of the
- # ConfigMap. The value is also a map, with each entry corresponding to an entry in
- # the ConfigMap, with the key corresponding to the ConfigMap entry key and the
- # value corresponding to the environment variable name.
+ # Kubernetes ConfigMaps to be injected into the container. Each entry in the
+ # map represents a ConfigMap to be injected, with the key representing the
+ # name of the ConfigMap. The value is also a map, with each entry
+ # corresponding to an entry in the ConfigMap, with the key corresponding to
+ # the ConfigMap entry key and the value corresponding to the environment
+ # variable name.
configmaps_as_env_vars = {}
- # Kubernetes ConfigMaps to be injected into the container as volume mounts. Each
- # entry in the map represents a ConfigMap to be mounted, with the key representing
- # the name of the ConfigMap and the value as a map containing required mountPath
- # (file path on the container to mount the ConfigMap to) and optional subPath
- # (sub-path inside the referenced volume).
+ # Kubernetes ConfigMaps to be injected into the container as volume mounts.
+ # Each entry in the map represents a ConfigMap to be mounted, with the key
+ # representing the name of the ConfigMap and the value as a map containing
+ # required mountPath (file path on the container to mount the ConfigMap to)
+ # and optional subPath (sub-path inside the referenced volume).
configmaps_as_volumes = {}
- # The protocol on which this service's Docker container accepts traffic. Must be
- # one of the supported protocols:
- # https://kubernetes.io/docs/concepts/services-networking/service/#protocol-suppor
- # .
+ # The protocol on which this service's Docker container accepts traffic. Must
+ # be one of the supported protocols:
+ # https://kubernetes.io/docs/concepts/services-networking/service/#protocol-support.
container_protocol = "TCP"
# The map that lets you define Kubernetes resources you want installed and
# configured as part of the chart.
custom_resources = {}
- # The number of canary Pods to run on the Kubernetes cluster for this service. If
- # greater than 0, you must provide var.canary_image.
+ # The number of canary Pods to run on the Kubernetes cluster for this service.
+ # If greater than 0, you must provide var.canary_image.
desired_number_of_canary_pods = 0
- # The domain name for the DNS A record to bind to the Ingress resource for this
- # service (e.g. service.foo.com). Depending on your external-dns configuration,
- # this will also create the DNS record in the configured DNS service (e.g.,
- # Route53).
+ # The domain name for the DNS A record to bind to the Ingress resource for
+ # this service (e.g. service.foo.com). Depending on your external-dns
+ # configuration, this will also create the DNS record in the configured DNS
+ # service (e.g., Route53).
domain_name = null
- # The TTL value of the DNS A record that is bound to the Ingress resource. Only
- # used if var.domain_name is set and external-dns is deployed.
+ # The TTL value of the DNS A record that is bound to the Ingress resource.
+ # Only used if var.domain_name is set and external-dns is deployed.
domain_propagation_ttl = null
- # Configuration for using the IAM role with Service Accounts feature to provide
- # permissions to the applications. This expects a map with two properties:
- # `openid_connect_provider_arn` and `openid_connect_provider_url`. The
- # `openid_connect_provider_arn` is the ARN of the OpenID Connect Provider for EKS
- # to retrieve IAM credentials, while `openid_connect_provider_url` is the URL.
- # Leave as an empty string if you do not wish to use IAM role with Service
- # Accounts.
+ # Configuration for using the IAM role with Service Accounts feature to
+ # provide permissions to the applications. This expects a map with two
+ # properties: `openid_connect_provider_arn` and `openid_connect_provider_url`.
+ # The `openid_connect_provider_arn` is the ARN of the OpenID Connect Provider
+ # for EKS to retrieve IAM credentials, while `openid_connect_provider_url` is
+ # the URL. Leave as an empty string if you do not wish to use IAM role with
+ # Service Accounts.
eks_iam_role_for_service_accounts_config = {"openid_connect_provider_arn":"","openid_connect_provider_url":""}
- # Whether or not to enable liveness probe. Liveness checks indicate whether or not
- # the container is alive. When these checks fail, the cluster will automatically
- # rotate the Pod.
+ # Whether or not to enable liveness probe. Liveness checks indicate whether or
+ # not the container is alive. When these checks fail, the cluster will
+ # automatically rotate the Pod.
enable_liveness_probe = false
- # Whether or not to enable readiness probe. Readiness checks indicate whether or
- # not the container can accept traffic. When these checks fail, the Pods are
- # automatically removed from the Service (and added back in when they pass).
+ # Whether or not to enable readiness probe. Readiness checks indicate whether
+ # or not the container can accept traffic. When these checks fail, the Pods
+ # are automatically removed from the Service (and added back in when they
+ # pass).
enable_readiness_probe = false
- # A map of environment variable name to environment variable value that should be
- # made available to the Docker container.
+ # A map of environment variable name to environment variable value that should
+ # be made available to the Docker container.
env_vars = {}
# How the service will be exposed in the cluster. Must be one of `external`
- # (accessible over the public Internet), `internal` (only accessible from within
- # the same VPC as the cluster), `cluster-internal` (only accessible within the
- # Kubernetes network), `none` (deploys as a headless service with no service IP).
+ # (accessible over the public Internet), `internal` (only accessible from
+ # within the same VPC as the cluster), `cluster-internal` (only accessible
+ # within the Kubernetes network), `none` (deploys as a headless service with
+ # no service IP).
expose_type = "cluster-internal"
# A boolean that indicates whether the access logs bucket should be destroyed,
- # even if there are files in it, when you run Terraform destroy. Unless you are
- # using this bucket only for test purposes, you'll want to leave this variable set
- # to false.
+ # even if there are files in it, when you run Terraform destroy. Unless you
+ # are using this bucket only for test purposes, you'll want to leave this
+ # variable set to false.
force_destroy_ingress_access_logs = false
# The version of the k8s-service helm chart to deploy.
helm_chart_version = "v0.2.18"
# Configure the Horizontal Pod Autoscaler (HPA) information for the associated
- # Deployment. HPA is disabled when this variable is set to null. Note that to use
- # an HPA, you must have a corresponding service deployed to your cluster that
- # exports the metrics (e.g., metrics-server
+ # Deployment. HPA is disabled when this variable is set to null. Note that to
+ # use an HPA, you must have a corresponding service deployed to your cluster
+ # that exports the metrics (e.g., metrics-server
# https://github.com/kubernetes-sigs/metrics-server).
horizontal_pod_autoscaler = null
- # An object defining the policy to attach to `iam_role_name` if the IAM role is
- # going to be created. Accepts a map of objects, where the map keys are sids for
- # IAM policy statements, and the object fields are the resources, actions, and the
- # effect ("Allow" or "Deny") of the statement. Ignored if `iam_role_arn` is
- # provided. Leave as null if you do not wish to use IAM role with Service
- # Accounts.
+ # An object defining the policy to attach to `iam_role_name` if the IAM role
+ # is going to be created. Accepts a map of objects, where the map keys are
+ # sids for IAM policy statements, and the object fields are the resources,
+ # actions, and the effect ("Allow" or "Deny") of the statement. Ignored if
+ # `iam_role_arn` is provided. Leave as null if you do not wish to use IAM role
+ # with Service Accounts.
iam_policy = null
# Whether or not the IAM role passed in `iam_role_name` already exists. Set to
# true if it exists, or false if it needs to be created. Defaults to false.
iam_role_exists = false
- # The name of an IAM role that will be used by the pod to access the AWS API. If
- # `iam_role_exists` is set to false, this role will be created. Leave as an empty
- # string if you do not wish to use IAM role with Service Accounts.
+ # The name of an IAM role that will be used by the pod to access the AWS API.
+ # If `iam_role_exists` is set to false, this role will be created. Leave as an
+ # empty string if you do not wish to use IAM role with Service Accounts.
iam_role_name = ""
# Set to true if the S3 bucket to store the Ingress access logs is managed
# external to this module.
ingress_access_logs_s3_bucket_already_exists = false
- # The name to use for the S3 bucket where the Ingress access logs will be stored.
- # If you leave this blank, a name will be generated automatically based on
- # var.application_name.
+ # The name to use for the S3 bucket where the Ingress access logs will be
+ # stored. If you leave this blank, a name will be generated automatically
+ # based on var.application_name.
ingress_access_logs_s3_bucket_name = ""
- # The prefix to use for ingress access logs associated with the ALB. All logs will
- # be stored in a key with this prefix. If null, the application name will be used.
+ # The prefix to use for ingress access logs associated with the ALB. All logs
+ # will be stored in a key with this prefix. If null, the application name will
+ # be used.
ingress_access_logs_s3_prefix = null
# A list of custom ingress annotations, such as health checks and TLS
# certificates, to add to the Helm chart. See:
- # https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/guide/ingres
- # /annotations/
+ # https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/guide/ingress/annotations/
ingress_annotations = {}
- # The protocol used by the Ingress ALB resource to communicate with the Service.
- # Must be one of HTTP or HTTPS.
+ # The protocol used by the Ingress ALB resource to communicate with the
+ # Service. Must be one of HTTP or HTTPS.
ingress_backend_protocol = "HTTP"
- # When true, HTTP requests will automatically be redirected to use SSL (HTTPS).
- # Used only when expose_type is either external or internal.
+ # When true, HTTP requests will automatically be redirected to use SSL
+ # (HTTPS). Used only when expose_type is either external or internal.
ingress_configure_ssl_redirect = true
- # Assign the ingress resource to an IngressGroup. All Ingress rules of the group
- # will be collapsed to a single ALB. The rules will be collapsed in priority
- # order, with lower numbers being evaluated first.
+ # Assign the ingress resource to an IngressGroup. All Ingress rules of the
+ # group will be collapsed to a single ALB. The rules will be collapsed in
+ # priority order, with lower numbers being evaluated first.
ingress_group = null
# A list of maps of protocols and ports that the ALB should listen on.
ingress_listener_protocol_ports = [{"port":80,"protocol":"HTTP"},{"port":443,"protocol":"HTTPS"}]
# Path prefix that should be matched to route to the service. For Kubernetes
- # Versions <1.19, Use /* to match all paths. For Kubernetes Versions >=1.19, use /
- # with ingress_path_type set to Prefix to match all paths.
+ # Versions <1.19, Use /* to match all paths. For Kubernetes Versions >=1.19,
+ # use / with ingress_path_type set to Prefix to match all paths.
ingress_path = "/"
# The path type to use for the ingress rule. Refer to
- # https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types for
- # more information.
+ # https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types
+ # for more information.
ingress_path_type = "Prefix"
# Set to true if the Ingress SSL redirect rule is managed externally. This is
# useful when configuring Ingress grouping and you only want one service to be
- # managing the SSL redirect rules. Only used if ingress_configure_ssl_redirect is
- # true.
+ # managing the SSL redirect rules. Only used if ingress_configure_ssl_redirect
+ # is true.
ingress_ssl_redirect_rule_already_exists = false
- # Whether or not the redirect rule requires setting path type. Set to true when
- # deploying to Kubernetes clusters with version >=1.19. Only used if
+ # Whether or not the redirect rule requires setting path type. Set to true
+ # when deploying to Kubernetes clusters with version >=1.19. Only used if
# ingress_configure_ssl_redirect is true.
ingress_ssl_redirect_rule_requires_path_type = true
- # Controls how the ALB routes traffic to the Pods. Supports 'instance' mode (route
- # traffic to NodePort and load balance across all worker nodes, relying on
- # Kubernetes Service networking to route to the pods), or 'ip' mode (route traffic
- # directly to the pod IP - only works with AWS VPC CNI). Must be set to 'ip' if
- # using Fargate. Only used if expose_type is not cluster-internal.
+ # Controls how the ALB routes traffic to the Pods. Supports 'instance' mode
+ # (route traffic to NodePort and load balance across all worker nodes, relying
+ # on Kubernetes Service networking to route to the pods), or 'ip' mode (route
+ # traffic directly to the pod IP - only works with AWS VPC CNI). Must be set
+ # to 'ip' if using Fargate. Only used if expose_type is not cluster-internal.
ingress_target_type = "instance"
# Seconds to wait after Pod creation before liveness probe has any effect. Any
@@ -362,34 +365,35 @@ module "k_8_s_service" {
# URL path for the endpoint that the liveness probe should ping.
liveness_probe_path = "/"
- # Port that the liveness probe should use to connect to the application container.
+ # Port that the liveness probe should use to connect to the application
+ # container.
liveness_probe_port = 80
- # Protocol (HTTP or HTTPS) that the liveness probe should use to connect to the
- # application container.
+ # Protocol (HTTP or HTTPS) that the liveness probe should use to connect to
+ # the application container.
liveness_probe_protocol = "HTTP"
- # The minimum number of pods that should be available at any given point in time.
- # This is used to configure a PodDisruptionBudget for the service, allowing you to
- # achieve a graceful rollout. See
- # https://blog.gruntwork.io/avoiding-outages-in-your-kubernetes-cluster-using-podd
- # sruptionbudgets-ef6a4baa5085 for an introduction to PodDisruptionBudgets.
+ # The minimum number of pods that should be available at any given point in
+ # time. This is used to configure a PodDisruptionBudget for the service,
+ # allowing you to achieve a graceful rollout. See
+ # https://blog.gruntwork.io/avoiding-outages-in-your-kubernetes-cluster-using-poddisruptionbudgets-ef6a4baa5085
+ # for an introduction to PodDisruptionBudgets.
min_number_of_pods_available = 0
- # After this number of days, Ingress log files should be transitioned from S3 to
- # Glacier. Set to 0 to never archive logs.
+ # After this number of days, Ingress log files should be transitioned from S3
+ # to Glacier. Set to 0 to never archive logs.
num_days_after_which_archive_ingress_log_data = 0
- # After this number of days, Ingress log files should be deleted from S3. Set to 0
- # to never delete logs.
+ # After this number of days, Ingress log files should be deleted from S3. Set
+ # to 0 to never delete logs.
num_days_after_which_delete_ingress_log_data = 0
- # Override any computed chart inputs with this map. This map is shallow merged to
- # the computed chart inputs prior to passing on to the Helm Release. This is
- # provided as a workaround while the terraform module does not support a
- # particular input value that is exposed in the underlying chart. Please always
- # file a GitHub issue to request exposing additional underlying input values prior
- # to using this variable.
+ # Override any computed chart inputs with this map. This map is shallow merged
+ # to the computed chart inputs prior to passing on to the Helm Release. This
+ # is provided as a workaround while the terraform module does not support a
+ # particular input value that is exposed in the underlying chart. Please
+ # always file a GitHub issue to request exposing additional underlying input
+ # values prior to using this variable.
override_chart_inputs = {}
# Seconds to wait after Pod creation before liveness probe has any effect. Any
@@ -407,39 +411,39 @@ module "k_8_s_service" {
# container.
readiness_probe_port = 80
- # Protocol (HTTP or HTTPS) that the readiness probe should use to connect to the
- # application container.
+ # Protocol (HTTP or HTTPS) that the readiness probe should use to connect to
+ # the application container.
readiness_probe_protocol = "HTTP"
# Paths that should be allocated as tmpfs volumes in the Deployment container.
- # Each entry in the map is a key value pair where the key is an arbitrary name to
- # bind to the volume, and the value is the path in the container to mount the
- # tmpfs volume.
+ # Each entry in the map is a key value pair where the key is an arbitrary name
+ # to bind to the volume, and the value is the path in the container to mount
+ # the tmpfs volume.
scratch_paths = {}
# Kubernetes Secrets to be injected into the container. Each entry in the map
- # represents a Secret to be injected, with the key representing the name of the
- # Secret. The value is also a map, with each entry corresponding to an entry in
- # the Secret, with the key corresponding to the Secret entry key and the value
- # corresponding to the environment variable name.
+ # represents a Secret to be injected, with the key representing the name of
+ # the Secret. The value is also a map, with each entry corresponding to an
+ # entry in the Secret, with the key corresponding to the Secret entry key and
+ # the value corresponding to the environment variable name.
secrets_as_env_vars = {}
# Kubernetes Secrets to be injected into the container as volume mounts. Each
- # entry in the map represents a Secret to be mounted, with the key representing
- # the name of the Secret and the value as a map containing required mountPath
- # (file path on the container to mount the Secret to) and optional subPath
- # (sub-path inside the referenced volume).
+ # entry in the map represents a Secret to be mounted, with the key
+ # representing the name of the Secret and the value as a map containing
+ # required mountPath (file path on the container to mount the Secret to) and
+ # optional subPath (sub-path inside the referenced volume).
secrets_as_volumes = {}
- # When true, and service_account_name is not blank, lookup and assign an existing
- # ServiceAccount in the Namespace to the Pods.
+ # When true, and service_account_name is not blank, lookup and assign an
+ # existing ServiceAccount in the Namespace to the Pods.
service_account_exists = false
# The name of a service account to create for use with the Pods. This service
- # account will be mapped to the IAM role defined in `var.iam_role_name` to give
- # the pod permissions to access the AWS API. Must be unique in this namespace.
- # Leave as an empty string if you do not wish to assign a Service Account to the
- # Pods.
+ # account will be mapped to the IAM role defined in `var.iam_role_name` to
+ # give the pod permissions to access the AWS API. Must be unique in this
+ # namespace. Leave as an empty string if you do not wish to assign a Service
+ # Account to the Pods.
service_account_name = ""
# The port to expose on the Service. This is most useful when addressing the
@@ -447,26 +451,27 @@ module "k_8_s_service" {
# Ingress resource.
service_port = 80
- # Map of keys to container definitions that allow you to manage additional side
- # car containers that should be included in the Pod. Note that the values are
- # injected directly into the container list for the Pod Spec.
+ # Map of keys to container definitions that allow you to manage additional
+ # side car containers that should be included in the Pod. Note that the values
+ # are injected directly into the container list for the Pod Spec.
sidecar_containers = {}
- # Grace period in seconds that Kubernetes will wait before terminating the pod.
- # The timeout happens in parallel to preStop hook and the SIGTERM signal,
+ # Grace period in seconds that Kubernetes will wait before terminating the
+ # pod. The timeout happens in parallel to preStop hook and the SIGTERM signal,
# Kubernetes does not wait for preStop to finish before beginning the grace
# period.
termination_grace_period_seconds = null
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
# A local file path where the helm chart values will be emitted. Use to debug
- # issues with the helm chart values. Set to null to prevent creation of the file.
+ # issues with the helm chart values. Set to null to prevent creation of the
+ # file.
values_file_path = null
# When true, wait until Pods are up and healthy or wait_timeout seconds before
@@ -492,7 +497,7 @@ module "k_8_s_service" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/k8s-service?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/k8s-service?ref=v0.104.12"
}
inputs = {
@@ -526,43 +531,44 @@ inputs = {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Map of additional ports to expose for the container. The key is the name of the
- # port and value contains port number and protocol.
+ # Map of additional ports to expose for the container. The key is the name of
+ # the port and value contains port number and protocol.
additional_ports = null
- # A list of ACM certificate ARNs to attach to the ALB. The first certificate in
- # the list will be added as default certificate.
+ # A list of ACM certificate ARNs to attach to the ALB. The first certificate
+ # in the list will be added as default certificate.
alb_acm_certificate_arns = []
- # The number of consecutive health check successes required before considering an
- # unhealthy target healthy.
+ # The number of consecutive health check successes required before considering
+ # an unhealthy target healthy.
alb_health_check_healthy_threshold = 2
# Interval between ALB health checks in seconds.
alb_health_check_interval = 30
- # URL path for the endpoint that the ALB health check should ping. Defaults to /.
+ # URL path for the endpoint that the ALB health check should ping. Defaults to
+ # /.
alb_health_check_path = "/"
# String value specifying the port that the ALB health check should probe. By
- # default, this will be set to the traffic port (the NodePort or port where the
- # service receives traffic). This can also be set to a Kubernetes named port, or
- # direct integer value. See
- # https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.3/guide/ingres
- # /annotations/#healthcheck-port for more information.
+ # default, this will be set to the traffic port (the NodePort or port where
+ # the service receives traffic). This can also be set to a Kubernetes named
+ # port, or direct integer value. See
+ # https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.3/guide/ingress/annotations/#healthcheck-port
+ # for more information.
alb_health_check_port = "traffic-port"
- # Protocol (HTTP or HTTPS) that the ALB health check should use to connect to the
- # application container.
+ # Protocol (HTTP or HTTPS) that the ALB health check should use to connect to
+ # the application container.
alb_health_check_protocol = "HTTP"
- # The HTTP status code that should be expected when doing health checks against
- # the specified health check path. Accepts a single value (200), multiple values
- # (200,201), or a range of values (200-300).
+ # The HTTP status code that should be expected when doing health checks
+ # against the specified health check path. Accepts a single value (200),
+ # multiple values (200,201), or a range of values (200-300).
alb_health_check_success_codes = "200"
- # The timeout, in seconds, during which no response from a target means a failed
- # health check.
+ # The timeout, in seconds, during which no response from a target means a
+ # failed health check.
alb_health_check_timeout = 10
# The Docker image to use for the canary. Required if
@@ -572,167 +578,169 @@ inputs = {
# Allow deletion of new resources created in this upgrade when upgrade fails.
cleanup_on_fail = null
- # Kubernetes ConfigMaps to be injected into the container. Each entry in the map
- # represents a ConfigMap to be injected, with the key representing the name of the
- # ConfigMap. The value is also a map, with each entry corresponding to an entry in
- # the ConfigMap, with the key corresponding to the ConfigMap entry key and the
- # value corresponding to the environment variable name.
+ # Kubernetes ConfigMaps to be injected into the container. Each entry in the
+ # map represents a ConfigMap to be injected, with the key representing the
+ # name of the ConfigMap. The value is also a map, with each entry
+ # corresponding to an entry in the ConfigMap, with the key corresponding to
+ # the ConfigMap entry key and the value corresponding to the environment
+ # variable name.
configmaps_as_env_vars = {}
- # Kubernetes ConfigMaps to be injected into the container as volume mounts. Each
- # entry in the map represents a ConfigMap to be mounted, with the key representing
- # the name of the ConfigMap and the value as a map containing required mountPath
- # (file path on the container to mount the ConfigMap to) and optional subPath
- # (sub-path inside the referenced volume).
+ # Kubernetes ConfigMaps to be injected into the container as volume mounts.
+ # Each entry in the map represents a ConfigMap to be mounted, with the key
+ # representing the name of the ConfigMap and the value as a map containing
+ # required mountPath (file path on the container to mount the ConfigMap to)
+ # and optional subPath (sub-path inside the referenced volume).
configmaps_as_volumes = {}
- # The protocol on which this service's Docker container accepts traffic. Must be
- # one of the supported protocols:
- # https://kubernetes.io/docs/concepts/services-networking/service/#protocol-suppor
- # .
+ # The protocol on which this service's Docker container accepts traffic. Must
+ # be one of the supported protocols:
+ # https://kubernetes.io/docs/concepts/services-networking/service/#protocol-support.
container_protocol = "TCP"
# The map that lets you define Kubernetes resources you want installed and
# configured as part of the chart.
custom_resources = {}
- # The number of canary Pods to run on the Kubernetes cluster for this service. If
- # greater than 0, you must provide var.canary_image.
+ # The number of canary Pods to run on the Kubernetes cluster for this service.
+ # If greater than 0, you must provide var.canary_image.
desired_number_of_canary_pods = 0
- # The domain name for the DNS A record to bind to the Ingress resource for this
- # service (e.g. service.foo.com). Depending on your external-dns configuration,
- # this will also create the DNS record in the configured DNS service (e.g.,
- # Route53).
+ # The domain name for the DNS A record to bind to the Ingress resource for
+ # this service (e.g. service.foo.com). Depending on your external-dns
+ # configuration, this will also create the DNS record in the configured DNS
+ # service (e.g., Route53).
domain_name = null
- # The TTL value of the DNS A record that is bound to the Ingress resource. Only
- # used if var.domain_name is set and external-dns is deployed.
+ # The TTL value of the DNS A record that is bound to the Ingress resource.
+ # Only used if var.domain_name is set and external-dns is deployed.
domain_propagation_ttl = null
- # Configuration for using the IAM role with Service Accounts feature to provide
- # permissions to the applications. This expects a map with two properties:
- # `openid_connect_provider_arn` and `openid_connect_provider_url`. The
- # `openid_connect_provider_arn` is the ARN of the OpenID Connect Provider for EKS
- # to retrieve IAM credentials, while `openid_connect_provider_url` is the URL.
- # Leave as an empty string if you do not wish to use IAM role with Service
- # Accounts.
+ # Configuration for using the IAM role with Service Accounts feature to
+ # provide permissions to the applications. This expects a map with two
+ # properties: `openid_connect_provider_arn` and `openid_connect_provider_url`.
+ # The `openid_connect_provider_arn` is the ARN of the OpenID Connect Provider
+ # for EKS to retrieve IAM credentials, while `openid_connect_provider_url` is
+ # the URL. Leave as an empty string if you do not wish to use IAM role with
+ # Service Accounts.
eks_iam_role_for_service_accounts_config = {"openid_connect_provider_arn":"","openid_connect_provider_url":""}
- # Whether or not to enable liveness probe. Liveness checks indicate whether or not
- # the container is alive. When these checks fail, the cluster will automatically
- # rotate the Pod.
+ # Whether or not to enable liveness probe. Liveness checks indicate whether or
+ # not the container is alive. When these checks fail, the cluster will
+ # automatically rotate the Pod.
enable_liveness_probe = false
- # Whether or not to enable readiness probe. Readiness checks indicate whether or
- # not the container can accept traffic. When these checks fail, the Pods are
- # automatically removed from the Service (and added back in when they pass).
+ # Whether or not to enable readiness probe. Readiness checks indicate whether
+ # or not the container can accept traffic. When these checks fail, the Pods
+ # are automatically removed from the Service (and added back in when they
+ # pass).
enable_readiness_probe = false
- # A map of environment variable name to environment variable value that should be
- # made available to the Docker container.
+ # A map of environment variable name to environment variable value that should
+ # be made available to the Docker container.
env_vars = {}
# How the service will be exposed in the cluster. Must be one of `external`
- # (accessible over the public Internet), `internal` (only accessible from within
- # the same VPC as the cluster), `cluster-internal` (only accessible within the
- # Kubernetes network), `none` (deploys as a headless service with no service IP).
+ # (accessible over the public Internet), `internal` (only accessible from
+ # within the same VPC as the cluster), `cluster-internal` (only accessible
+ # within the Kubernetes network), `none` (deploys as a headless service with
+ # no service IP).
expose_type = "cluster-internal"
# A boolean that indicates whether the access logs bucket should be destroyed,
- # even if there are files in it, when you run Terraform destroy. Unless you are
- # using this bucket only for test purposes, you'll want to leave this variable set
- # to false.
+ # even if there are files in it, when you run Terraform destroy. Unless you
+ # are using this bucket only for test purposes, you'll want to leave this
+ # variable set to false.
force_destroy_ingress_access_logs = false
# The version of the k8s-service helm chart to deploy.
helm_chart_version = "v0.2.18"
# Configure the Horizontal Pod Autoscaler (HPA) information for the associated
- # Deployment. HPA is disabled when this variable is set to null. Note that to use
- # an HPA, you must have a corresponding service deployed to your cluster that
- # exports the metrics (e.g., metrics-server
+ # Deployment. HPA is disabled when this variable is set to null. Note that to
+ # use an HPA, you must have a corresponding service deployed to your cluster
+ # that exports the metrics (e.g., metrics-server
# https://github.com/kubernetes-sigs/metrics-server).
horizontal_pod_autoscaler = null
- # An object defining the policy to attach to `iam_role_name` if the IAM role is
- # going to be created. Accepts a map of objects, where the map keys are sids for
- # IAM policy statements, and the object fields are the resources, actions, and the
- # effect ("Allow" or "Deny") of the statement. Ignored if `iam_role_arn` is
- # provided. Leave as null if you do not wish to use IAM role with Service
- # Accounts.
+ # An object defining the policy to attach to `iam_role_name` if the IAM role
+ # is going to be created. Accepts a map of objects, where the map keys are
+ # sids for IAM policy statements, and the object fields are the resources,
+ # actions, and the effect ("Allow" or "Deny") of the statement. Ignored if
+ # `iam_role_arn` is provided. Leave as null if you do not wish to use IAM role
+ # with Service Accounts.
iam_policy = null
# Whether or not the IAM role passed in `iam_role_name` already exists. Set to
# true if it exists, or false if it needs to be created. Defaults to false.
iam_role_exists = false
- # The name of an IAM role that will be used by the pod to access the AWS API. If
- # `iam_role_exists` is set to false, this role will be created. Leave as an empty
- # string if you do not wish to use IAM role with Service Accounts.
+ # The name of an IAM role that will be used by the pod to access the AWS API.
+ # If `iam_role_exists` is set to false, this role will be created. Leave as an
+ # empty string if you do not wish to use IAM role with Service Accounts.
iam_role_name = ""
# Set to true if the S3 bucket to store the Ingress access logs is managed
# external to this module.
ingress_access_logs_s3_bucket_already_exists = false
- # The name to use for the S3 bucket where the Ingress access logs will be stored.
- # If you leave this blank, a name will be generated automatically based on
- # var.application_name.
+ # The name to use for the S3 bucket where the Ingress access logs will be
+ # stored. If you leave this blank, a name will be generated automatically
+ # based on var.application_name.
ingress_access_logs_s3_bucket_name = ""
- # The prefix to use for ingress access logs associated with the ALB. All logs will
- # be stored in a key with this prefix. If null, the application name will be used.
+ # The prefix to use for ingress access logs associated with the ALB. All logs
+ # will be stored in a key with this prefix. If null, the application name will
+ # be used.
ingress_access_logs_s3_prefix = null
# A list of custom ingress annotations, such as health checks and TLS
# certificates, to add to the Helm chart. See:
- # https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/guide/ingres
- # /annotations/
+ # https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/guide/ingress/annotations/
ingress_annotations = {}
- # The protocol used by the Ingress ALB resource to communicate with the Service.
- # Must be one of HTTP or HTTPS.
+ # The protocol used by the Ingress ALB resource to communicate with the
+ # Service. Must be one of HTTP or HTTPS.
ingress_backend_protocol = "HTTP"
- # When true, HTTP requests will automatically be redirected to use SSL (HTTPS).
- # Used only when expose_type is either external or internal.
+ # When true, HTTP requests will automatically be redirected to use SSL
+ # (HTTPS). Used only when expose_type is either external or internal.
ingress_configure_ssl_redirect = true
- # Assign the ingress resource to an IngressGroup. All Ingress rules of the group
- # will be collapsed to a single ALB. The rules will be collapsed in priority
- # order, with lower numbers being evaluated first.
+ # Assign the ingress resource to an IngressGroup. All Ingress rules of the
+ # group will be collapsed to a single ALB. The rules will be collapsed in
+ # priority order, with lower numbers being evaluated first.
ingress_group = null
# A list of maps of protocols and ports that the ALB should listen on.
ingress_listener_protocol_ports = [{"port":80,"protocol":"HTTP"},{"port":443,"protocol":"HTTPS"}]
# Path prefix that should be matched to route to the service. For Kubernetes
- # Versions <1.19, Use /* to match all paths. For Kubernetes Versions >=1.19, use /
- # with ingress_path_type set to Prefix to match all paths.
+ # Versions <1.19, Use /* to match all paths. For Kubernetes Versions >=1.19,
+ # use / with ingress_path_type set to Prefix to match all paths.
ingress_path = "/"
# The path type to use for the ingress rule. Refer to
- # https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types for
- # more information.
+ # https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types
+ # for more information.
ingress_path_type = "Prefix"
# Set to true if the Ingress SSL redirect rule is managed externally. This is
# useful when configuring Ingress grouping and you only want one service to be
- # managing the SSL redirect rules. Only used if ingress_configure_ssl_redirect is
- # true.
+ # managing the SSL redirect rules. Only used if ingress_configure_ssl_redirect
+ # is true.
ingress_ssl_redirect_rule_already_exists = false
- # Whether or not the redirect rule requires setting path type. Set to true when
- # deploying to Kubernetes clusters with version >=1.19. Only used if
+ # Whether or not the redirect rule requires setting path type. Set to true
+ # when deploying to Kubernetes clusters with version >=1.19. Only used if
# ingress_configure_ssl_redirect is true.
ingress_ssl_redirect_rule_requires_path_type = true
- # Controls how the ALB routes traffic to the Pods. Supports 'instance' mode (route
- # traffic to NodePort and load balance across all worker nodes, relying on
- # Kubernetes Service networking to route to the pods), or 'ip' mode (route traffic
- # directly to the pod IP - only works with AWS VPC CNI). Must be set to 'ip' if
- # using Fargate. Only used if expose_type is not cluster-internal.
+ # Controls how the ALB routes traffic to the Pods. Supports 'instance' mode
+ # (route traffic to NodePort and load balance across all worker nodes, relying
+ # on Kubernetes Service networking to route to the pods), or 'ip' mode (route
+ # traffic directly to the pod IP - only works with AWS VPC CNI). Must be set
+ # to 'ip' if using Fargate. Only used if expose_type is not cluster-internal.
ingress_target_type = "instance"
# Seconds to wait after Pod creation before liveness probe has any effect. Any
@@ -746,34 +754,35 @@ inputs = {
# URL path for the endpoint that the liveness probe should ping.
liveness_probe_path = "/"
- # Port that the liveness probe should use to connect to the application container.
+ # Port that the liveness probe should use to connect to the application
+ # container.
liveness_probe_port = 80
- # Protocol (HTTP or HTTPS) that the liveness probe should use to connect to the
- # application container.
+ # Protocol (HTTP or HTTPS) that the liveness probe should use to connect to
+ # the application container.
liveness_probe_protocol = "HTTP"
- # The minimum number of pods that should be available at any given point in time.
- # This is used to configure a PodDisruptionBudget for the service, allowing you to
- # achieve a graceful rollout. See
- # https://blog.gruntwork.io/avoiding-outages-in-your-kubernetes-cluster-using-podd
- # sruptionbudgets-ef6a4baa5085 for an introduction to PodDisruptionBudgets.
+ # The minimum number of pods that should be available at any given point in
+ # time. This is used to configure a PodDisruptionBudget for the service,
+ # allowing you to achieve a graceful rollout. See
+ # https://blog.gruntwork.io/avoiding-outages-in-your-kubernetes-cluster-using-poddisruptionbudgets-ef6a4baa5085
+ # for an introduction to PodDisruptionBudgets.
min_number_of_pods_available = 0
- # After this number of days, Ingress log files should be transitioned from S3 to
- # Glacier. Set to 0 to never archive logs.
+ # After this number of days, Ingress log files should be transitioned from S3
+ # to Glacier. Set to 0 to never archive logs.
num_days_after_which_archive_ingress_log_data = 0
- # After this number of days, Ingress log files should be deleted from S3. Set to 0
- # to never delete logs.
+ # After this number of days, Ingress log files should be deleted from S3. Set
+ # to 0 to never delete logs.
num_days_after_which_delete_ingress_log_data = 0
- # Override any computed chart inputs with this map. This map is shallow merged to
- # the computed chart inputs prior to passing on to the Helm Release. This is
- # provided as a workaround while the terraform module does not support a
- # particular input value that is exposed in the underlying chart. Please always
- # file a GitHub issue to request exposing additional underlying input values prior
- # to using this variable.
+ # Override any computed chart inputs with this map. This map is shallow merged
+ # to the computed chart inputs prior to passing on to the Helm Release. This
+ # is provided as a workaround while the terraform module does not support a
+ # particular input value that is exposed in the underlying chart. Please
+ # always file a GitHub issue to request exposing additional underlying input
+ # values prior to using this variable.
override_chart_inputs = {}
# Seconds to wait after Pod creation before liveness probe has any effect. Any
@@ -791,39 +800,39 @@ inputs = {
# container.
readiness_probe_port = 80
- # Protocol (HTTP or HTTPS) that the readiness probe should use to connect to the
- # application container.
+ # Protocol (HTTP or HTTPS) that the readiness probe should use to connect to
+ # the application container.
readiness_probe_protocol = "HTTP"
# Paths that should be allocated as tmpfs volumes in the Deployment container.
- # Each entry in the map is a key value pair where the key is an arbitrary name to
- # bind to the volume, and the value is the path in the container to mount the
- # tmpfs volume.
+ # Each entry in the map is a key value pair where the key is an arbitrary name
+ # to bind to the volume, and the value is the path in the container to mount
+ # the tmpfs volume.
scratch_paths = {}
# Kubernetes Secrets to be injected into the container. Each entry in the map
- # represents a Secret to be injected, with the key representing the name of the
- # Secret. The value is also a map, with each entry corresponding to an entry in
- # the Secret, with the key corresponding to the Secret entry key and the value
- # corresponding to the environment variable name.
+ # represents a Secret to be injected, with the key representing the name of
+ # the Secret. The value is also a map, with each entry corresponding to an
+ # entry in the Secret, with the key corresponding to the Secret entry key and
+ # the value corresponding to the environment variable name.
secrets_as_env_vars = {}
# Kubernetes Secrets to be injected into the container as volume mounts. Each
- # entry in the map represents a Secret to be mounted, with the key representing
- # the name of the Secret and the value as a map containing required mountPath
- # (file path on the container to mount the Secret to) and optional subPath
- # (sub-path inside the referenced volume).
+ # entry in the map represents a Secret to be mounted, with the key
+ # representing the name of the Secret and the value as a map containing
+ # required mountPath (file path on the container to mount the Secret to) and
+ # optional subPath (sub-path inside the referenced volume).
secrets_as_volumes = {}
- # When true, and service_account_name is not blank, lookup and assign an existing
- # ServiceAccount in the Namespace to the Pods.
+ # When true, and service_account_name is not blank, lookup and assign an
+ # existing ServiceAccount in the Namespace to the Pods.
service_account_exists = false
# The name of a service account to create for use with the Pods. This service
- # account will be mapped to the IAM role defined in `var.iam_role_name` to give
- # the pod permissions to access the AWS API. Must be unique in this namespace.
- # Leave as an empty string if you do not wish to assign a Service Account to the
- # Pods.
+ # account will be mapped to the IAM role defined in `var.iam_role_name` to
+ # give the pod permissions to access the AWS API. Must be unique in this
+ # namespace. Leave as an empty string if you do not wish to assign a Service
+ # Account to the Pods.
service_account_name = ""
# The port to expose on the Service. This is most useful when addressing the
@@ -831,26 +840,27 @@ inputs = {
# Ingress resource.
service_port = 80
- # Map of keys to container definitions that allow you to manage additional side
- # car containers that should be included in the Pod. Note that the values are
- # injected directly into the container list for the Pod Spec.
+ # Map of keys to container definitions that allow you to manage additional
+ # side car containers that should be included in the Pod. Note that the values
+ # are injected directly into the container list for the Pod Spec.
sidecar_containers = {}
- # Grace period in seconds that Kubernetes will wait before terminating the pod.
- # The timeout happens in parallel to preStop hook and the SIGTERM signal,
+ # Grace period in seconds that Kubernetes will wait before terminating the
+ # pod. The timeout happens in parallel to preStop hook and the SIGTERM signal,
# Kubernetes does not wait for preStop to finish before beginning the grace
# period.
termination_grace_period_seconds = null
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
# A local file path where the helm chart values will be emitted. Use to debug
- # issues with the helm chart values. Set to null to prevent creation of the file.
+ # issues with the helm chart values. Set to null to prevent creation of the
+ # file.
values_file_path = null
# When true, wait until Pods are up and healthy or wait_timeout seconds before
@@ -1943,11 +1953,11 @@ Number of seconds to wait for Pods to become healthy before marking the deployme
diff --git a/docs/reference/services/app-orchestration/lambda.md b/docs/reference/services/app-orchestration/lambda.md
index e75483441d..13191c6ce7 100644
--- a/docs/reference/services/app-orchestration/lambda.md
+++ b/docs/reference/services/app-orchestration/lambda.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Lambda
-View Source
+View Source
Release Notes
@@ -59,9 +59,9 @@ documentation in the [terraform-aws-lambda](https://github.com/gruntwork-io/terr
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules): The main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules): The main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/test): Automated tests for the modules and examples.
## Deploy
@@ -69,7 +69,7 @@ documentation in the [terraform-aws-lambda](https://github.com/gruntwork-io/terr
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -77,7 +77,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -101,44 +101,44 @@ If you want to deploy this repo in production, check out the following resources
module "lambda" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/lambda?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/lambda?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
# ----------------------------------------------------------------------------------------------------
- # A list of SNS topic ARNs to notify when the lambda alarms change to ALARM, OK,
- # or INSUFFICIENT_DATA state
+ # A list of SNS topic ARNs to notify when the lambda alarms change to ALARM,
+ # OK, or INSUFFICIENT_DATA state
alarm_sns_topic_arns =
- # The maximum amount of memory, in MB, your Lambda function will be able to use at
- # runtime. Can be set in 64MB increments from 128MB up to 1536MB. Note that the
- # amount of CPU power given to a Lambda function is proportional to the amount of
- # memory you request, so a Lambda function with 256MB of memory has twice as much
- # CPU power as one with 128MB.
+ # The maximum amount of memory, in MB, your Lambda function will be able to
+ # use at runtime. Can be set in 64MB increments from 128MB up to 1536MB. Note
+ # that the amount of CPU power given to a Lambda function is proportional to
+ # the amount of memory you request, so a Lambda function with 256MB of memory
+ # has twice as much CPU power as one with 128MB.
memory_size =
- # The name of the Lambda function. Used to namespace all resources created by this
- # module.
+ # The name of the Lambda function. Used to namespace all resources created by
+ # this module.
name =
- # The maximum amount of time, in seconds, your Lambda function will be allowed to
- # run. Must be between 1 and 900 seconds.
+ # The maximum amount of time, in seconds, your Lambda function will be allowed
+ # to run. Must be between 1 and 900 seconds.
timeout =
# ----------------------------------------------------------------------------------------------------
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # A list of Security Group IDs that should be attached to the Lambda function when
- # running in a VPC. Only used if var.run_in_vpc is true.
+ # A list of Security Group IDs that should be attached to the Lambda function
+ # when running in a VPC. Only used if var.run_in_vpc is true.
additional_security_group_ids = []
- # A custom assume role policy for the IAM role for this Lambda function. If not
- # set, the default is a policy that allows the Lambda service to assume the IAM
- # role, which is what most users will need. However, you can use this variable to
- # override the policy for special cases, such as using a Lambda function to rotate
- # AWS Secrets Manager secrets.
+ # A custom assume role policy for the IAM role for this Lambda function. If
+ # not set, the default is a policy that allows the Lambda service to assume
+ # the IAM role, which is what most users will need. However, you can use this
+ # variable to override the policy for special cases, such as using a Lambda
+ # function to rotate AWS Secrets Manager secrets.
assume_role_policy = null
# The ID (ARN, alias ARN, AWS ID) of a customer managed KMS Key to use for
@@ -146,23 +146,22 @@ module "lambda" {
cloudwatch_log_group_kms_key_id = null
# The number of days to retain log events in the log group. Refer to
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # watch_log_group#retention_in_days for all the valid values. When null, the log
- # events are retained forever.
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group#retention_in_days
+ # for all the valid values. When null, the log events are retained forever.
cloudwatch_log_group_retention_in_days = null
- # The ARN of the destination to deliver matching log events to. Kinesis stream or
- # Lambda function ARN. Only applicable if var.should_create_cloudwatch_log_group
- # is true.
+ # The ARN of the destination to deliver matching log events to. Kinesis stream
+ # or Lambda function ARN. Only applicable if
+ # var.should_create_cloudwatch_log_group is true.
cloudwatch_log_group_subscription_destination_arn = null
- # The method used to distribute log data to the destination. Only applicable when
- # var.cloudwatch_log_group_subscription_destination_arn is a kinesis stream. Valid
- # values are `Random` and `ByLogStream`.
+ # The method used to distribute log data to the destination. Only applicable
+ # when var.cloudwatch_log_group_subscription_destination_arn is a kinesis
+ # stream. Valid values are `Random` and `ByLogStream`.
cloudwatch_log_group_subscription_distribution = null
- # A valid CloudWatch Logs filter pattern for subscribing to a filtered stream of
- # log events.
+ # A valid CloudWatch Logs filter pattern for subscribing to a filtered stream
+ # of log events.
cloudwatch_log_group_subscription_filter_pattern = ""
# ARN of an IAM role that grants Amazon CloudWatch Logs permissions to deliver
@@ -170,8 +169,8 @@ module "lambda" {
# var.cloudwatch_log_group_subscription_destination_arn is a kinesis stream.
cloudwatch_log_group_subscription_role_arn = null
- # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys are
- # tag keys and values are tag values.
+ # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys
+ # are tag keys and values are tag values.
cloudwatch_log_group_tags = null
# The CMD for the docker image. Only used if you specify a Docker image via
@@ -179,69 +178,71 @@ module "lambda" {
command = []
# The arithmetic operation to use when comparing the specified Statistic and
- # Threshold. The specified Statistic value is used as the first operand. Either of
- # the following is supported: `GreaterThanOrEqualToThreshold`,
+ # Threshold. The specified Statistic value is used as the first operand.
+ # Either of the following is supported: `GreaterThanOrEqualToThreshold`,
# `GreaterThanThreshold`, `LessThanThreshold`, `LessThanOrEqualToThreshold`.
# Additionally, the values `LessThanLowerOrGreaterThanUpperThreshold`,
# `LessThanLowerThreshold`, and `GreaterThanUpperThreshold` are used only for
# alarms based on anomaly detection models.
comparison_operator = "GreaterThanThreshold"
- # Set to false to have this module skip creating resources. This weird parameter
- # exists solely because Terraform does not support conditional modules. Therefore,
- # this is a hack to allow you to conditionally decide if this module should create
- # anything or not.
+ # Set to false to have this module skip creating resources. This weird
+ # parameter exists solely because Terraform does not support conditional
+ # modules. Therefore, this is a hack to allow you to conditionally decide if
+ # this module should create anything or not.
create_resources = true
# The number of datapoints that must be breaching to trigger the alarm.
datapoints_to_alarm = 1
- # The ARN of an SNS topic or an SQS queue to notify when invocation of a Lambda
- # function fails. If this option is used, you must grant this function's IAM role
- # (the ID is outputted as iam_role_id) access to write to the target object, which
- # means allowing either the sns:Publish or sqs:SendMessage action on this ARN,
- # depending on which service is targeted.
+ # The ARN of an SNS topic or an SQS queue to notify when invocation of a
+ # Lambda function fails. If this option is used, you must grant this
+ # function's IAM role (the ID is outputted as iam_role_id) access to write to
+ # the target object, which means allowing either the sns:Publish or
+ # sqs:SendMessage action on this ARN, depending on which service is targeted.
dead_letter_target_arn = null
# A description of what the Lambda function does.
description = null
- # Set to true to enable versioning for this Lambda function. This allows you to
- # use aliases to refer to execute different versions of the function in different
- # environments. Note that an alternative way to run Lambda functions in multiple
- # environments is to version your Terraform code.
+ # Set to true to enable versioning for this Lambda function. This allows you
+ # to use aliases to refer to execute different versions of the function in
+ # different environments. Note that an alternative way to run Lambda functions
+ # in multiple environments is to version your Terraform code.
enable_versioning = false
- # The ENTRYPOINT for the docker image. Only used if you specify a Docker image via
- # image_uri.
+ # The ENTRYPOINT for the docker image. Only used if you specify a Docker image
+ # via image_uri.
entry_point = []
# A map of environment variables to pass to the Lambda function. AWS will
- # automatically encrypt these with KMS and decrypt them when running the function.
+ # automatically encrypt these with KMS and decrypt them when running the
+ # function.
environment_variables = {"EnvVarPlaceHolder":"Placeholder"}
- # The number of periods over which data is compared to the specified threshold.
+ # The number of periods over which data is compared to the specified
+ # threshold.
evaluation_periods = 1
- # The ARN of an EFS access point to use to access the file system. Only used if
- # var.mount_to_file_system is true.
+ # The ARN of an EFS access point to use to access the file system. Only used
+ # if var.mount_to_file_system is true.
file_system_access_point_arn = null
- # The mount path where the lambda can access the file system. This path must begin
- # with /mnt/. Only used if var.mount_to_file_system is true.
+ # The mount path where the lambda can access the file system. This path must
+ # begin with /mnt/. Only used if var.mount_to_file_system is true.
file_system_mount_path = null
- # The function entrypoint in your code. This is typically the name of a function
- # or method in your code that AWS will execute when this Lambda function is
- # triggered.
+ # The function entrypoint in your code. This is typically the name of a
+ # function or method in your code that AWS will execute when this Lambda
+ # function is triggered.
handler = null
- # An object defining the policy to attach to `iam_role_name` if the IAM role is
- # going to be created. Accepts a map of objects, where the map keys are sids for
- # IAM policy statements, and the object fields are the resources, actions, and the
- # effect ("Allow" or "Deny") of the statement. Ignored if `iam_role_arn` is
- # provided. Leave as null if you do not wish to use IAM role with Service
- # Accounts.
+ # An object defining the policy to attach to `iam_role_name` if the IAM role
+ # is going to be created. Accepts a map of objects, where the map keys are
+ # sids for IAM policy statements, and the object fields are the resources,
+ # actions, and the effect ("Allow" or "Deny") of the statement. Ignored if
+ # `iam_role_arn` is provided. Leave as null if you do not wish to use IAM role
+ # with Service Accounts.
iam_policy = null
# The ECR image URI containing the function's deployment package. Example:
@@ -253,20 +254,20 @@ module "lambda" {
# account.
kms_key_arn = null
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role for the lambda
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role for the lambda
lambda_role_permissions_boundary_arn = null
- # The list of Lambda Layer Version ARNs to attach to your Lambda Function. You can
- # have a maximum of 5 Layers attached to each function.
+ # The list of Lambda Layer Version ARNs to attach to your Lambda Function. You
+ # can have a maximum of 5 Layers attached to each function.
layers = []
# The name for the alarm's associated metric.
metric_name = "Errors"
- # Set to true to mount your Lambda function on an EFS. Note that the lambda must
- # also be deployed inside a VPC (run_in_vpc must be set to true) for this config
- # to have any effect.
+ # Set to true to mount your Lambda function on an EFS. Note that the lambda
+ # must also be deployed inside a VPC (run_in_vpc must be set to true) for this
+ # config to have any effect.
mount_to_file_system = false
# The namespace to use for all resources created by this module. If not set,
@@ -276,99 +277,102 @@ module "lambda" {
# The period in seconds over which the specified `statistic` is applied.
period = 60
- # The amount of reserved concurrent executions for this lambda function or -1 if
- # unreserved.
+ # The amount of reserved concurrent executions for this lambda function or -1
+ # if unreserved.
reserved_concurrent_executions = null
# Set to true to give your Lambda function access to resources within a VPC.
run_in_vpc = false
- # The runtime environment for the Lambda function (e.g. nodejs, python2.7, java8).
- # See
- # https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html#SSS-CreateF
- # nction-request-Runtime for all possible values.
+ # The runtime environment for the Lambda function (e.g. nodejs, python2.7,
+ # java8). See
+ # https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html#SSS-CreateFunction-request-Runtime
+ # for all possible values.
runtime = null
- # An S3 bucket location containing the function's deployment package. Exactly one
- # of var.source_path or the var.s3_xxx variables must be specified.
+ # An S3 bucket location containing the function's deployment package. Exactly
+ # one of var.source_path or the var.s3_xxx variables must be specified.
s3_bucket = null
- # The path within var.s3_bucket where the deployment package is located. Exactly
- # one of var.source_path or the var.s3_xxx variables must be specified.
+ # The path within var.s3_bucket where the deployment package is located.
+ # Exactly one of var.source_path or the var.s3_xxx variables must be
+ # specified.
s3_key = null
- # The version of the path in var.s3_key to use as the deployment package. Exactly
- # one of var.source_path or the var.s3_xxx variables must be specified.
+ # The version of the path in var.s3_key to use as the deployment package.
+ # Exactly one of var.source_path or the var.s3_xxx variables must be
+ # specified.
s3_object_version = null
- # An expression that defines the schedule for this lambda job. For example, cron(0
- # 20 * * ? *) or rate(5 minutes). For more information visit
- # https://docs.aws.amazon.com/lambda/latest/dg/services-cloudwatchevents-expressio
- # s.html
+ # An expression that defines the schedule for this lambda job. For example,
+ # cron(0 20 * * ? *) or rate(5 minutes). For more information visit
+ # https://docs.aws.amazon.com/lambda/latest/dg/services-cloudwatchevents-expressions.html
schedule_expression = null
# If set to false, this function will no longer set the source_code_hash
# parameter, so this module will no longer detect and upload changes to the
- # deployment package. This is primarily useful if you update the Lambda function
- # from outside of this module (e.g., you have scripts that do it separately) and
- # want to avoid a plan diff. Used only if var.source_path is non-empty.
+ # deployment package. This is primarily useful if you update the Lambda
+ # function from outside of this module (e.g., you have scripts that do it
+ # separately) and want to avoid a plan diff. Used only if var.source_path is
+ # non-empty.
set_source_code_hash = true
- # When true, precreate the CloudWatch Log Group to use for log aggregation from
- # the lambda function execution. This is useful if you wish to customize the
- # CloudWatch Log Group with various settings such as retention periods and KMS
- # encryption. When false, AWS Lambda will automatically create a basic log group
- # to use.
+ # When true, precreate the CloudWatch Log Group to use for log aggregation
+ # from the lambda function execution. This is useful if you wish to customize
+ # the CloudWatch Log Group with various settings such as retention periods and
+ # KMS encryption. When false, AWS Lambda will automatically create a basic log
+ # group to use.
should_create_cloudwatch_log_group = true
# If true, create an egress rule allowing all outbound traffic from Lambda
# function to the entire Internet (e.g. 0.0.0.0/0).
should_create_outbound_rule = false
- # Set to true to skip zip archive creation and assume that var.source_path points
- # to a pregenerated zip archive.
+ # Set to true to skip zip archive creation and assume that var.source_path
+ # points to a pregenerated zip archive.
skip_zip = false
- # The path to the directory that contains your Lambda function source code. This
- # code will be zipped up and uploaded to Lambda as your deployment package. If
- # var.skip_zip is set to true, then this is assumed to be the path to an
- # already-zipped file, and it will be uploaded directly to Lambda as a deployment
- # package. Exactly one of var.source_path or the var.s3_xxx variables must be
- # specified.
+ # The path to the directory that contains your Lambda function source code.
+ # This code will be zipped up and uploaded to Lambda as your deployment
+ # package. If var.skip_zip is set to true, then this is assumed to be the path
+ # to an already-zipped file, and it will be uploaded directly to Lambda as a
+ # deployment package. Exactly one of var.source_path or the var.s3_xxx
+ # variables must be specified.
source_path = null
# The statistic to apply to the alarm's associated metric.
statistic = "Sum"
- # A list of subnet IDs the Lambda function should be able to access within your
- # VPC. Only used if var.run_in_vpc is true.
+ # A list of subnet IDs the Lambda function should be able to access within
+ # your VPC. Only used if var.run_in_vpc is true.
subnet_ids = []
# A map of tags to apply to the Lambda function.
tags = {}
- # The value against which the specified statistic is compared. This parameter is
- # required for alarms based on static thresholds, but should not be used for
- # alarms based on anomaly detection models.
+ # The value against which the specified statistic is compared. This parameter
+ # is required for alarms based on static thresholds, but should not be used
+ # for alarms based on anomaly detection models.
threshold = 0
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
# The ID of the VPC the Lambda function should be able to access. Only used if
# var.run_in_vpc is true.
vpc_id = null
- # The working directory for the docker image. Only used if you specify a Docker
- # image via image_uri.
+ # The working directory for the docker image. Only used if you specify a
+ # Docker image via image_uri.
working_directory = null
- # The path to store the output zip file of your source code. If empty, defaults to
- # module path. This should be the full path to the zip file, not a directory.
+ # The path to store the output zip file of your source code. If empty,
+ # defaults to module path. This should be the full path to the zip file, not a
+ # directory.
zip_output_path = null
}
@@ -386,7 +390,7 @@ module "lambda" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/lambda?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/lambda?ref=v0.104.12"
}
inputs = {
@@ -395,38 +399,38 @@ inputs = {
# REQUIRED VARIABLES
# ----------------------------------------------------------------------------------------------------
- # A list of SNS topic ARNs to notify when the lambda alarms change to ALARM, OK,
- # or INSUFFICIENT_DATA state
+ # A list of SNS topic ARNs to notify when the lambda alarms change to ALARM,
+ # OK, or INSUFFICIENT_DATA state
alarm_sns_topic_arns =
- # The maximum amount of memory, in MB, your Lambda function will be able to use at
- # runtime. Can be set in 64MB increments from 128MB up to 1536MB. Note that the
- # amount of CPU power given to a Lambda function is proportional to the amount of
- # memory you request, so a Lambda function with 256MB of memory has twice as much
- # CPU power as one with 128MB.
+ # The maximum amount of memory, in MB, your Lambda function will be able to
+ # use at runtime. Can be set in 64MB increments from 128MB up to 1536MB. Note
+ # that the amount of CPU power given to a Lambda function is proportional to
+ # the amount of memory you request, so a Lambda function with 256MB of memory
+ # has twice as much CPU power as one with 128MB.
memory_size =
- # The name of the Lambda function. Used to namespace all resources created by this
- # module.
+ # The name of the Lambda function. Used to namespace all resources created by
+ # this module.
name =
- # The maximum amount of time, in seconds, your Lambda function will be allowed to
- # run. Must be between 1 and 900 seconds.
+ # The maximum amount of time, in seconds, your Lambda function will be allowed
+ # to run. Must be between 1 and 900 seconds.
timeout =
# ----------------------------------------------------------------------------------------------------
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # A list of Security Group IDs that should be attached to the Lambda function when
- # running in a VPC. Only used if var.run_in_vpc is true.
+ # A list of Security Group IDs that should be attached to the Lambda function
+ # when running in a VPC. Only used if var.run_in_vpc is true.
additional_security_group_ids = []
- # A custom assume role policy for the IAM role for this Lambda function. If not
- # set, the default is a policy that allows the Lambda service to assume the IAM
- # role, which is what most users will need. However, you can use this variable to
- # override the policy for special cases, such as using a Lambda function to rotate
- # AWS Secrets Manager secrets.
+ # A custom assume role policy for the IAM role for this Lambda function. If
+ # not set, the default is a policy that allows the Lambda service to assume
+ # the IAM role, which is what most users will need. However, you can use this
+ # variable to override the policy for special cases, such as using a Lambda
+ # function to rotate AWS Secrets Manager secrets.
assume_role_policy = null
# The ID (ARN, alias ARN, AWS ID) of a customer managed KMS Key to use for
@@ -434,23 +438,22 @@ inputs = {
cloudwatch_log_group_kms_key_id = null
# The number of days to retain log events in the log group. Refer to
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # watch_log_group#retention_in_days for all the valid values. When null, the log
- # events are retained forever.
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group#retention_in_days
+ # for all the valid values. When null, the log events are retained forever.
cloudwatch_log_group_retention_in_days = null
- # The ARN of the destination to deliver matching log events to. Kinesis stream or
- # Lambda function ARN. Only applicable if var.should_create_cloudwatch_log_group
- # is true.
+ # The ARN of the destination to deliver matching log events to. Kinesis stream
+ # or Lambda function ARN. Only applicable if
+ # var.should_create_cloudwatch_log_group is true.
cloudwatch_log_group_subscription_destination_arn = null
- # The method used to distribute log data to the destination. Only applicable when
- # var.cloudwatch_log_group_subscription_destination_arn is a kinesis stream. Valid
- # values are `Random` and `ByLogStream`.
+ # The method used to distribute log data to the destination. Only applicable
+ # when var.cloudwatch_log_group_subscription_destination_arn is a kinesis
+ # stream. Valid values are `Random` and `ByLogStream`.
cloudwatch_log_group_subscription_distribution = null
- # A valid CloudWatch Logs filter pattern for subscribing to a filtered stream of
- # log events.
+ # A valid CloudWatch Logs filter pattern for subscribing to a filtered stream
+ # of log events.
cloudwatch_log_group_subscription_filter_pattern = ""
# ARN of an IAM role that grants Amazon CloudWatch Logs permissions to deliver
@@ -458,8 +461,8 @@ inputs = {
# var.cloudwatch_log_group_subscription_destination_arn is a kinesis stream.
cloudwatch_log_group_subscription_role_arn = null
- # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys are
- # tag keys and values are tag values.
+ # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys
+ # are tag keys and values are tag values.
cloudwatch_log_group_tags = null
# The CMD for the docker image. Only used if you specify a Docker image via
@@ -467,69 +470,71 @@ inputs = {
command = []
# The arithmetic operation to use when comparing the specified Statistic and
- # Threshold. The specified Statistic value is used as the first operand. Either of
- # the following is supported: `GreaterThanOrEqualToThreshold`,
+ # Threshold. The specified Statistic value is used as the first operand.
+ # Either of the following is supported: `GreaterThanOrEqualToThreshold`,
# `GreaterThanThreshold`, `LessThanThreshold`, `LessThanOrEqualToThreshold`.
# Additionally, the values `LessThanLowerOrGreaterThanUpperThreshold`,
# `LessThanLowerThreshold`, and `GreaterThanUpperThreshold` are used only for
# alarms based on anomaly detection models.
comparison_operator = "GreaterThanThreshold"
- # Set to false to have this module skip creating resources. This weird parameter
- # exists solely because Terraform does not support conditional modules. Therefore,
- # this is a hack to allow you to conditionally decide if this module should create
- # anything or not.
+ # Set to false to have this module skip creating resources. This weird
+ # parameter exists solely because Terraform does not support conditional
+ # modules. Therefore, this is a hack to allow you to conditionally decide if
+ # this module should create anything or not.
create_resources = true
# The number of datapoints that must be breaching to trigger the alarm.
datapoints_to_alarm = 1
- # The ARN of an SNS topic or an SQS queue to notify when invocation of a Lambda
- # function fails. If this option is used, you must grant this function's IAM role
- # (the ID is outputted as iam_role_id) access to write to the target object, which
- # means allowing either the sns:Publish or sqs:SendMessage action on this ARN,
- # depending on which service is targeted.
+ # The ARN of an SNS topic or an SQS queue to notify when invocation of a
+ # Lambda function fails. If this option is used, you must grant this
+ # function's IAM role (the ID is outputted as iam_role_id) access to write to
+ # the target object, which means allowing either the sns:Publish or
+ # sqs:SendMessage action on this ARN, depending on which service is targeted.
dead_letter_target_arn = null
# A description of what the Lambda function does.
description = null
- # Set to true to enable versioning for this Lambda function. This allows you to
- # use aliases to refer to execute different versions of the function in different
- # environments. Note that an alternative way to run Lambda functions in multiple
- # environments is to version your Terraform code.
+ # Set to true to enable versioning for this Lambda function. This allows you
+ # to use aliases to refer to execute different versions of the function in
+ # different environments. Note that an alternative way to run Lambda functions
+ # in multiple environments is to version your Terraform code.
enable_versioning = false
- # The ENTRYPOINT for the docker image. Only used if you specify a Docker image via
- # image_uri.
+ # The ENTRYPOINT for the docker image. Only used if you specify a Docker image
+ # via image_uri.
entry_point = []
# A map of environment variables to pass to the Lambda function. AWS will
- # automatically encrypt these with KMS and decrypt them when running the function.
+ # automatically encrypt these with KMS and decrypt them when running the
+ # function.
environment_variables = {"EnvVarPlaceHolder":"Placeholder"}
- # The number of periods over which data is compared to the specified threshold.
+ # The number of periods over which data is compared to the specified
+ # threshold.
evaluation_periods = 1
- # The ARN of an EFS access point to use to access the file system. Only used if
- # var.mount_to_file_system is true.
+ # The ARN of an EFS access point to use to access the file system. Only used
+ # if var.mount_to_file_system is true.
file_system_access_point_arn = null
- # The mount path where the lambda can access the file system. This path must begin
- # with /mnt/. Only used if var.mount_to_file_system is true.
+ # The mount path where the lambda can access the file system. This path must
+ # begin with /mnt/. Only used if var.mount_to_file_system is true.
file_system_mount_path = null
- # The function entrypoint in your code. This is typically the name of a function
- # or method in your code that AWS will execute when this Lambda function is
- # triggered.
+ # The function entrypoint in your code. This is typically the name of a
+ # function or method in your code that AWS will execute when this Lambda
+ # function is triggered.
handler = null
- # An object defining the policy to attach to `iam_role_name` if the IAM role is
- # going to be created. Accepts a map of objects, where the map keys are sids for
- # IAM policy statements, and the object fields are the resources, actions, and the
- # effect ("Allow" or "Deny") of the statement. Ignored if `iam_role_arn` is
- # provided. Leave as null if you do not wish to use IAM role with Service
- # Accounts.
+ # An object defining the policy to attach to `iam_role_name` if the IAM role
+ # is going to be created. Accepts a map of objects, where the map keys are
+ # sids for IAM policy statements, and the object fields are the resources,
+ # actions, and the effect ("Allow" or "Deny") of the statement. Ignored if
+ # `iam_role_arn` is provided. Leave as null if you do not wish to use IAM role
+ # with Service Accounts.
iam_policy = null
# The ECR image URI containing the function's deployment package. Example:
@@ -541,20 +546,20 @@ inputs = {
# account.
kms_key_arn = null
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role for the lambda
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role for the lambda
lambda_role_permissions_boundary_arn = null
- # The list of Lambda Layer Version ARNs to attach to your Lambda Function. You can
- # have a maximum of 5 Layers attached to each function.
+ # The list of Lambda Layer Version ARNs to attach to your Lambda Function. You
+ # can have a maximum of 5 Layers attached to each function.
layers = []
# The name for the alarm's associated metric.
metric_name = "Errors"
- # Set to true to mount your Lambda function on an EFS. Note that the lambda must
- # also be deployed inside a VPC (run_in_vpc must be set to true) for this config
- # to have any effect.
+ # Set to true to mount your Lambda function on an EFS. Note that the lambda
+ # must also be deployed inside a VPC (run_in_vpc must be set to true) for this
+ # config to have any effect.
mount_to_file_system = false
# The namespace to use for all resources created by this module. If not set,
@@ -564,99 +569,102 @@ inputs = {
# The period in seconds over which the specified `statistic` is applied.
period = 60
- # The amount of reserved concurrent executions for this lambda function or -1 if
- # unreserved.
+ # The amount of reserved concurrent executions for this lambda function or -1
+ # if unreserved.
reserved_concurrent_executions = null
# Set to true to give your Lambda function access to resources within a VPC.
run_in_vpc = false
- # The runtime environment for the Lambda function (e.g. nodejs, python2.7, java8).
- # See
- # https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html#SSS-CreateF
- # nction-request-Runtime for all possible values.
+ # The runtime environment for the Lambda function (e.g. nodejs, python2.7,
+ # java8). See
+ # https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html#SSS-CreateFunction-request-Runtime
+ # for all possible values.
runtime = null
- # An S3 bucket location containing the function's deployment package. Exactly one
- # of var.source_path or the var.s3_xxx variables must be specified.
+ # An S3 bucket location containing the function's deployment package. Exactly
+ # one of var.source_path or the var.s3_xxx variables must be specified.
s3_bucket = null
- # The path within var.s3_bucket where the deployment package is located. Exactly
- # one of var.source_path or the var.s3_xxx variables must be specified.
+ # The path within var.s3_bucket where the deployment package is located.
+ # Exactly one of var.source_path or the var.s3_xxx variables must be
+ # specified.
s3_key = null
- # The version of the path in var.s3_key to use as the deployment package. Exactly
- # one of var.source_path or the var.s3_xxx variables must be specified.
+ # The version of the path in var.s3_key to use as the deployment package.
+ # Exactly one of var.source_path or the var.s3_xxx variables must be
+ # specified.
s3_object_version = null
- # An expression that defines the schedule for this lambda job. For example, cron(0
- # 20 * * ? *) or rate(5 minutes). For more information visit
- # https://docs.aws.amazon.com/lambda/latest/dg/services-cloudwatchevents-expressio
- # s.html
+ # An expression that defines the schedule for this lambda job. For example,
+ # cron(0 20 * * ? *) or rate(5 minutes). For more information visit
+ # https://docs.aws.amazon.com/lambda/latest/dg/services-cloudwatchevents-expressions.html
schedule_expression = null
# If set to false, this function will no longer set the source_code_hash
# parameter, so this module will no longer detect and upload changes to the
- # deployment package. This is primarily useful if you update the Lambda function
- # from outside of this module (e.g., you have scripts that do it separately) and
- # want to avoid a plan diff. Used only if var.source_path is non-empty.
+ # deployment package. This is primarily useful if you update the Lambda
+ # function from outside of this module (e.g., you have scripts that do it
+ # separately) and want to avoid a plan diff. Used only if var.source_path is
+ # non-empty.
set_source_code_hash = true
- # When true, precreate the CloudWatch Log Group to use for log aggregation from
- # the lambda function execution. This is useful if you wish to customize the
- # CloudWatch Log Group with various settings such as retention periods and KMS
- # encryption. When false, AWS Lambda will automatically create a basic log group
- # to use.
+ # When true, precreate the CloudWatch Log Group to use for log aggregation
+ # from the lambda function execution. This is useful if you wish to customize
+ # the CloudWatch Log Group with various settings such as retention periods and
+ # KMS encryption. When false, AWS Lambda will automatically create a basic log
+ # group to use.
should_create_cloudwatch_log_group = true
# If true, create an egress rule allowing all outbound traffic from Lambda
# function to the entire Internet (e.g. 0.0.0.0/0).
should_create_outbound_rule = false
- # Set to true to skip zip archive creation and assume that var.source_path points
- # to a pregenerated zip archive.
+ # Set to true to skip zip archive creation and assume that var.source_path
+ # points to a pregenerated zip archive.
skip_zip = false
- # The path to the directory that contains your Lambda function source code. This
- # code will be zipped up and uploaded to Lambda as your deployment package. If
- # var.skip_zip is set to true, then this is assumed to be the path to an
- # already-zipped file, and it will be uploaded directly to Lambda as a deployment
- # package. Exactly one of var.source_path or the var.s3_xxx variables must be
- # specified.
+ # The path to the directory that contains your Lambda function source code.
+ # This code will be zipped up and uploaded to Lambda as your deployment
+ # package. If var.skip_zip is set to true, then this is assumed to be the path
+ # to an already-zipped file, and it will be uploaded directly to Lambda as a
+ # deployment package. Exactly one of var.source_path or the var.s3_xxx
+ # variables must be specified.
source_path = null
# The statistic to apply to the alarm's associated metric.
statistic = "Sum"
- # A list of subnet IDs the Lambda function should be able to access within your
- # VPC. Only used if var.run_in_vpc is true.
+ # A list of subnet IDs the Lambda function should be able to access within
+ # your VPC. Only used if var.run_in_vpc is true.
subnet_ids = []
# A map of tags to apply to the Lambda function.
tags = {}
- # The value against which the specified statistic is compared. This parameter is
- # required for alarms based on static thresholds, but should not be used for
- # alarms based on anomaly detection models.
+ # The value against which the specified statistic is compared. This parameter
+ # is required for alarms based on static thresholds, but should not be used
+ # for alarms based on anomaly detection models.
threshold = 0
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
# The ID of the VPC the Lambda function should be able to access. Only used if
# var.run_in_vpc is true.
vpc_id = null
- # The working directory for the docker image. Only used if you specify a Docker
- # image via image_uri.
+ # The working directory for the docker image. Only used if you specify a
+ # Docker image via image_uri.
working_directory = null
- # The path to store the output zip file of your source code. If empty, defaults to
- # module path. This should be the full path to the zip file, not a directory.
+ # The path to store the output zip file of your source code. If empty,
+ # defaults to module path. This should be the full path to the zip file, not a
+ # directory.
zip_output_path = null
}
@@ -1363,11 +1371,11 @@ Latest published version of your Lambda Function
diff --git a/docs/reference/services/app-orchestration/public-static-website.md b/docs/reference/services/app-orchestration/public-static-website.md
index 49ce25b0f6..c97aec0ea4 100644
--- a/docs/reference/services/app-orchestration/public-static-website.md
+++ b/docs/reference/services/app-orchestration/public-static-website.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Public Static Website
-View Source
+View Source
Release Notes
@@ -60,7 +60,7 @@ If you’ve never used the Service Catalog before, make sure to read
### Core concepts
This module deploys a public website, so the S3 bucket and objects with it are readable by the public. It also is
-hosted in a Public Hosted Zone in Route 53. You may provide a `hosted_zone_id` in [variables](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/services/public-static-website/variables.tf),
+hosted in a Public Hosted Zone in Route 53. You may provide a `hosted_zone_id` in [variables](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/services/public-static-website/variables.tf),
or you may provide the `base_domain_name` associated with your Public Hosted Zone in Route 53, optionally along with
any tags that must match that zone in `base_domain_name_tags`. If you do the latter, this module will find the hosted
zone id for you.
@@ -71,17 +71,17 @@ website, and how to configure SSL, check out the documentation for the
and [s3-cloudfront](https://github.com/gruntwork-io/terraform-aws-static-assets/tree/master/modules/s3-cloudfront)
modules.
-* [Quick Start](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/services/public-static-website/core-concepts.md#quick-start)
+* [Quick Start](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/services/public-static-website/core-concepts.md#quick-start)
* [How to test the website](https://github.com/gruntwork-io/terraform-aws-static-assets/blob/master/modules/s3-static-website/core-concepts.md#how-to-test-the-website)
-* [How to configure HTTPS (SSL) or a CDN?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/services/public-static-website/core-concepts.md#how-to-configure-https-ssl-or-a-cdn)
+* [How to configure HTTPS (SSL) or a CDN?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/services/public-static-website/core-concepts.md#how-to-configure-https-ssl-or-a-cdn)
* [How to handle www + root domains](https://github.com/gruntwork-io/terraform-aws-static-assets/blob/master/modules/s3-static-website/core-concepts.md#how-do-i-handle-www—root-domains)
* [How do I configure Cross Origin Resource Sharing (CORS)?](https://github.com/gruntwork-io/terraform-aws-static-assets/blob/master/modules/s3-static-website/core-concepts.md#how-do-i-configure-cross-origin-resource-sharing-cors)
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/test): Automated tests for the modules and examples.
## Deploy
@@ -89,7 +89,7 @@ modules.
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -97,7 +97,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing/services/public-static-website/example-website):
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing/services/public-static-website/example-website):
The `examples/for-production` folder contains sample code optimized for direct usage in production. This is code from
the [Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -116,7 +116,7 @@ If you want to deploy this repo in production, check out the following resources
module "public_static_website" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/public-static-website?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/public-static-website?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -134,15 +134,15 @@ module "public_static_website" {
# ----------------------------------------------------------------------------------------------------
# The domain name associated with a hosted zone in Route 53. Usually the base
- # domain name of var.website_domain_name (e.g. foo.com). This is used to find the
- # hosted zone that will be used for the CloudFront distribution. If
+ # domain name of var.website_domain_name (e.g. foo.com). This is used to find
+ # the hosted zone that will be used for the CloudFront distribution. If
# var.create_route53_entry is true, one of var.base_domain_name or
# var.hosted_zone_id must be provided.
base_domain_name = null
# The tags associated with var.base_domain_name. If there are multiple hosted
- # zones for the same var.base_domain_name, this will help filter the hosted zones
- # so that the correct hosted zone is found.
+ # zones for the same var.base_domain_name, this will help filter the hosted
+ # zones so that the correct hosted zone is found.
base_domain_name_tags = {}
# A configuration for CORS on the S3 bucket. Default value comes from AWS. Can
@@ -151,13 +151,14 @@ module "public_static_website" {
# https://www.terraform.io/docs/providers/aws/r/s3_bucket.html#using-cors.
cors_rule = []
- # If set to true, create a DNS A Record in Route 53. If var.create_route53_entry
- # is true, one of var.base_domain_name or var.hosted_zone_id must be provided.
+ # If set to true, create a DNS A Record in Route 53. If
+ # var.create_route53_entry is true, one of var.base_domain_name or
+ # var.hosted_zone_id must be provided.
create_route53_entry = true
- # A map of custom tags to apply to the S3 bucket containing the website and the
- # CloudFront distribution created for it. The key is the tag name and the value is
- # the tag value.
+ # A map of custom tags to apply to the S3 bucket containing the website and
+ # the CloudFront distribution created for it. The key is the tag name and the
+ # value is the tag value.
custom_tags = {}
# A list of existing CloudFront functions to associate with the default cached
@@ -165,31 +166,31 @@ module "public_static_website" {
# high-scale, latency sensitive CDN customizations.
default_function_associations = []
- # A list of existing Lambda@Edge functions to associate with CloudFront. Lambda
- # version must be a published version and cannot be `$LATEST` (See
- # https://www.terraform.io/docs/providers/aws/r/cloudfront_distribution.html#lambd
- # _function_association for available options).
+ # A list of existing Lambda@Edge functions to associate with CloudFront.
+ # Lambda version must be a published version and cannot be `$LATEST` (See
+ # https://www.terraform.io/docs/providers/aws/r/cloudfront_distribution.html#lambda_function_association
+ # for available options).
default_lambda_associations = []
- # The default amount of time, in seconds, that an object is in a CloudFront cache
- # before CloudFront forwards another request in the absence of an 'Cache-Control
- # max-age' or 'Expires' header.
+ # The default amount of time, in seconds, that an object is in a CloudFront
+ # cache before CloudFront forwards another request in the absence of an
+ # 'Cache-Control max-age' or 'Expires' header.
default_ttl = 30
# Option to disable cloudfront log delivery to s3. This is required in regions
# where cloudfront cannot deliver logs to s3, see
- # https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/AccessLogs.ht
- # l#access-logs-choosing-s3-bucket
+ # https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/AccessLogs.html#access-logs-choosing-s3-bucket
disable_cloudfront_logging = false
# If set to true, a CloudFront function to implement default directory index
- # (looking up index.html in an S3 directory when path ends in /) is deployed. Only
- # relevant when var.restrict_bucket_access_to_cloudfront is set to true.
+ # (looking up index.html in an S3 directory when path ends in /) is deployed.
+ # Only relevant when var.restrict_bucket_access_to_cloudfront is set to true.
enable_default_directory_index_function = false
# Set to true to enable versioning. This means the bucket will retain all old
- # versions of all files. This is useful for backup purposes (e.g. you can rollback
- # to an older version), but it may mean your bucket uses more storage.
+ # versions of all files. This is useful for backup purposes (e.g. you can
+ # rollback to an older version), but it may mean your bucket uses more
+ # storage.
enable_versioning = true
# The path to the error document in the S3 bucket (e.g. error.html).
@@ -200,17 +201,17 @@ module "public_static_website" {
# If set to true, this will force the deletion of the website, redirect, and
# access log S3 buckets when you run terraform destroy, even if there is still
- # content in those buckets. This is only meant for testing and should not be used
- # in production.
+ # content in those buckets. This is only meant for testing and should not be
+ # used in production.
force_destroy = false
- # The headers you want CloudFront to forward to the origin. Set to * to forward
- # all headers.
+ # The headers you want CloudFront to forward to the origin. Set to * to
+ # forward all headers.
forward_headers = []
- # The ISO 3166-1-alpha-2 codes for which you want CloudFront either to distribute
- # your content (if var.geo_restriction_type is whitelist) or not distribute your
- # content (if var.geo_restriction_type is blacklist).
+ # The ISO 3166-1-alpha-2 codes for which you want CloudFront either to
+ # distribute your content (if var.geo_restriction_type is whitelist) or not
+ # distribute your content (if var.geo_restriction_type is blacklist).
geo_locations_list = []
# The method that you want to use to restrict distribution of your content by
@@ -218,37 +219,39 @@ module "public_static_website" {
geo_restriction_type = "none"
# The ID of the Route 53 Hosted Zone in which to create the DNS A Records
- # specified in var.website_domain_name. If var.create_route53_entry is true, one
- # of var.base_domain_name or var.hosted_zone_id must be provided.
+ # specified in var.website_domain_name. If var.create_route53_entry is true,
+ # one of var.base_domain_name or var.hosted_zone_id must be provided.
hosted_zone_id = null
# The path to the index document in the S3 bucket (e.g. index.html).
index_document = "index.html"
- # The maximum amount of time, in seconds, that an object is in a CloudFront cache
- # before CloudFront forwards another request to your origin to determine whether
- # the object has been updated. Only effective in the presence of 'Cache-Control
- # max-age', 'Cache-Control s-maxage', and 'Expires' headers.
+ # The maximum amount of time, in seconds, that an object is in a CloudFront
+ # cache before CloudFront forwards another request to your origin to determine
+ # whether the object has been updated. Only effective in the presence of
+ # 'Cache-Control max-age', 'Cache-Control s-maxage', and 'Expires' headers.
max_ttl = 60
- # The minimum amount of time that you want objects to stay in CloudFront caches
- # before CloudFront queries your origin to see whether the object has been
- # updated.
+ # The minimum amount of time that you want objects to stay in CloudFront
+ # caches before CloudFront queries your origin to see whether the object has
+ # been updated.
min_ttl = 0
# The minimum version of the SSL protocol that you want CloudFront to use for
# HTTPS connections. Refer to
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # front_distribution#minimum_protocol_version for possible values.
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudfront_distribution#minimum_protocol_version
+ # for possible values.
minimum_protocol_version = "TLSv1"
- # If set to true, the S3 bucket will only be accessible via CloudFront, and not
- # directly. NOTE: this is only known to work if the S3 Bucket is in us-east-1.
+ # If set to true, the S3 bucket will only be accessible via CloudFront, and
+ # not directly. NOTE: this is only known to work if the S3 Bucket is in
+ # us-east-1.
restrict_bucket_access_to_cloudfront = false
- # A map describing the routing_rule for the aws_s3_website_configuration resource.
- # Describes redirect behavior and conditions when redirects are applied. Conflicts
- # with routing_rules. Use routing_rules if rules contain empty String values.
+ # A map describing the routing_rule for the aws_s3_website_configuration
+ # resource. Describes redirect behavior and conditions when redirects are
+ # applied. Conflicts with routing_rules. Use routing_rules if rules contain
+ # empty String values.
routing_rule = {}
# A json string array containing routing rules for the
@@ -257,53 +260,54 @@ module "public_static_website" {
# when routing rules contain empty String values.
routing_rules = null
- # By default, the s3 bucket hosting the website is named after the domain name.
- # Use this configuration to override it with this value instead.
+ # By default, the s3 bucket hosting the website is named after the domain
+ # name. Use this configuration to override it with this value instead.
s3_bucket_override_bucket_name = null
- # The policy directives and their values that CloudFront includes as values for
- # the Content-Security-Policy HTTP response header. When null, the header is
- # omitted.
+ # The policy directives and their values that CloudFront includes as values
+ # for the Content-Security-Policy HTTP response header. When null, the header
+ # is omitted.
security_header_content_security_policy = "default-src 'self'; base-uri 'self'; block-all-mixed-content; font-src 'self' https: data:; form-action 'self'; frame-ancestors 'self'; img-src 'self' data:; object-src 'none'; script-src 'self'; script-src-attr 'none'; style-src 'self' https: 'unsafe-inline'; upgrade-insecure-requests"
- # Determines whether CloudFront includes the X-Content-Type-Options HTTP response
- # header with its value set to nosniff.
+ # Determines whether CloudFront includes the X-Content-Type-Options HTTP
+ # response header with its value set to nosniff.
security_header_enable_nosniff_content_type_options = true
- # Determines whether CloudFront includes the X-Frame-Options HTTP response header
- # and the header’s value. When null, the header is omitted.
+ # Determines whether CloudFront includes the X-Frame-Options HTTP response
+ # header and the header’s value. When null, the header is omitted.
security_header_frame_option = "SAMEORIGIN"
# Determines whether CloudFront includes the Strict-Transport-Security HTTP
# response header and the header’s value. When null, the header is omitted.
security_header_hsts = {"include_subdomains":true,"max_age":15552000,"preload":false}
- # Determines whether CloudFront includes the Referrer-Policy HTTP response header
- # and the header’s value. When null, the header is omitted.
+ # Determines whether CloudFront includes the Referrer-Policy HTTP response
+ # header and the header’s value. When null, the header is omitted.
security_header_referrer_policy = "no-referrer"
- # Determine whether CloudFront includes the X-Xss-Protection HTTP response header
- # and the header’s value. When null, the header is omitted.
+ # Determine whether CloudFront includes the X-Xss-Protection HTTP response
+ # header and the header’s value. When null, the header is omitted.
security_header_xss_protection = {"mode_block":false,"protection":false,"report_uri":null}
- # In older AWS accounts, you must set this variable to true to use the ARN of the
- # CloudFront log delivery AWS account in the access log bucket policy. In newer
- # AWS accounts, you must set this variable to false to use the CanonicalUser ID of
- # the CloudFront log delivery account. If you pick the wrong value, you'll get a
- # perpetual diff on the IAM policy. See
- # https://github.com/terraform-providers/terraform-provider-aws/issues/10158 for
- # context.
+ # In older AWS accounts, you must set this variable to true to use the ARN of
+ # the CloudFront log delivery AWS account in the access log bucket policy. In
+ # newer AWS accounts, you must set this variable to false to use the
+ # CanonicalUser ID of the CloudFront log delivery account. If you pick the
+ # wrong value, you'll get a perpetual diff on the IAM policy. See
+ # https://github.com/terraform-providers/terraform-provider-aws/issues/10158
+ # for context.
use_cloudfront_arn_for_bucket_policy = false
- # Use this element to specify the protocol that users can use to access the files
- # in the origin specified by TargetOriginId when a request matches the path
- # pattern in PathPattern. One of allow-all, https-only, or redirect-to-https.
+ # Use this element to specify the protocol that users can use to access the
+ # files in the origin specified by TargetOriginId when a request matches the
+ # path pattern in PathPattern. One of allow-all, https-only, or
+ # redirect-to-https.
viewer_protocol_policy = "allow-all"
- # If you're using AWS WAF to filter CloudFront requests, the Id of the AWS WAF web
- # ACL that is associated with the distribution. Refer to
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # front_distribution#web_acl_id for more details.
+ # If you're using AWS WAF to filter CloudFront requests, the Id of the AWS WAF
+ # web ACL that is associated with the distribution. Refer to
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudfront_distribution#web_acl_id
+ # for more details.
web_acl_id = null
}
@@ -321,7 +325,7 @@ module "public_static_website" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/public-static-website?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/public-static-website?ref=v0.104.12"
}
inputs = {
@@ -342,15 +346,15 @@ inputs = {
# ----------------------------------------------------------------------------------------------------
# The domain name associated with a hosted zone in Route 53. Usually the base
- # domain name of var.website_domain_name (e.g. foo.com). This is used to find the
- # hosted zone that will be used for the CloudFront distribution. If
+ # domain name of var.website_domain_name (e.g. foo.com). This is used to find
+ # the hosted zone that will be used for the CloudFront distribution. If
# var.create_route53_entry is true, one of var.base_domain_name or
# var.hosted_zone_id must be provided.
base_domain_name = null
# The tags associated with var.base_domain_name. If there are multiple hosted
- # zones for the same var.base_domain_name, this will help filter the hosted zones
- # so that the correct hosted zone is found.
+ # zones for the same var.base_domain_name, this will help filter the hosted
+ # zones so that the correct hosted zone is found.
base_domain_name_tags = {}
# A configuration for CORS on the S3 bucket. Default value comes from AWS. Can
@@ -359,13 +363,14 @@ inputs = {
# https://www.terraform.io/docs/providers/aws/r/s3_bucket.html#using-cors.
cors_rule = []
- # If set to true, create a DNS A Record in Route 53. If var.create_route53_entry
- # is true, one of var.base_domain_name or var.hosted_zone_id must be provided.
+ # If set to true, create a DNS A Record in Route 53. If
+ # var.create_route53_entry is true, one of var.base_domain_name or
+ # var.hosted_zone_id must be provided.
create_route53_entry = true
- # A map of custom tags to apply to the S3 bucket containing the website and the
- # CloudFront distribution created for it. The key is the tag name and the value is
- # the tag value.
+ # A map of custom tags to apply to the S3 bucket containing the website and
+ # the CloudFront distribution created for it. The key is the tag name and the
+ # value is the tag value.
custom_tags = {}
# A list of existing CloudFront functions to associate with the default cached
@@ -373,31 +378,31 @@ inputs = {
# high-scale, latency sensitive CDN customizations.
default_function_associations = []
- # A list of existing Lambda@Edge functions to associate with CloudFront. Lambda
- # version must be a published version and cannot be `$LATEST` (See
- # https://www.terraform.io/docs/providers/aws/r/cloudfront_distribution.html#lambd
- # _function_association for available options).
+ # A list of existing Lambda@Edge functions to associate with CloudFront.
+ # Lambda version must be a published version and cannot be `$LATEST` (See
+ # https://www.terraform.io/docs/providers/aws/r/cloudfront_distribution.html#lambda_function_association
+ # for available options).
default_lambda_associations = []
- # The default amount of time, in seconds, that an object is in a CloudFront cache
- # before CloudFront forwards another request in the absence of an 'Cache-Control
- # max-age' or 'Expires' header.
+ # The default amount of time, in seconds, that an object is in a CloudFront
+ # cache before CloudFront forwards another request in the absence of an
+ # 'Cache-Control max-age' or 'Expires' header.
default_ttl = 30
# Option to disable cloudfront log delivery to s3. This is required in regions
# where cloudfront cannot deliver logs to s3, see
- # https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/AccessLogs.ht
- # l#access-logs-choosing-s3-bucket
+ # https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/AccessLogs.html#access-logs-choosing-s3-bucket
disable_cloudfront_logging = false
# If set to true, a CloudFront function to implement default directory index
- # (looking up index.html in an S3 directory when path ends in /) is deployed. Only
- # relevant when var.restrict_bucket_access_to_cloudfront is set to true.
+ # (looking up index.html in an S3 directory when path ends in /) is deployed.
+ # Only relevant when var.restrict_bucket_access_to_cloudfront is set to true.
enable_default_directory_index_function = false
# Set to true to enable versioning. This means the bucket will retain all old
- # versions of all files. This is useful for backup purposes (e.g. you can rollback
- # to an older version), but it may mean your bucket uses more storage.
+ # versions of all files. This is useful for backup purposes (e.g. you can
+ # rollback to an older version), but it may mean your bucket uses more
+ # storage.
enable_versioning = true
# The path to the error document in the S3 bucket (e.g. error.html).
@@ -408,17 +413,17 @@ inputs = {
# If set to true, this will force the deletion of the website, redirect, and
# access log S3 buckets when you run terraform destroy, even if there is still
- # content in those buckets. This is only meant for testing and should not be used
- # in production.
+ # content in those buckets. This is only meant for testing and should not be
+ # used in production.
force_destroy = false
- # The headers you want CloudFront to forward to the origin. Set to * to forward
- # all headers.
+ # The headers you want CloudFront to forward to the origin. Set to * to
+ # forward all headers.
forward_headers = []
- # The ISO 3166-1-alpha-2 codes for which you want CloudFront either to distribute
- # your content (if var.geo_restriction_type is whitelist) or not distribute your
- # content (if var.geo_restriction_type is blacklist).
+ # The ISO 3166-1-alpha-2 codes for which you want CloudFront either to
+ # distribute your content (if var.geo_restriction_type is whitelist) or not
+ # distribute your content (if var.geo_restriction_type is blacklist).
geo_locations_list = []
# The method that you want to use to restrict distribution of your content by
@@ -426,37 +431,39 @@ inputs = {
geo_restriction_type = "none"
# The ID of the Route 53 Hosted Zone in which to create the DNS A Records
- # specified in var.website_domain_name. If var.create_route53_entry is true, one
- # of var.base_domain_name or var.hosted_zone_id must be provided.
+ # specified in var.website_domain_name. If var.create_route53_entry is true,
+ # one of var.base_domain_name or var.hosted_zone_id must be provided.
hosted_zone_id = null
# The path to the index document in the S3 bucket (e.g. index.html).
index_document = "index.html"
- # The maximum amount of time, in seconds, that an object is in a CloudFront cache
- # before CloudFront forwards another request to your origin to determine whether
- # the object has been updated. Only effective in the presence of 'Cache-Control
- # max-age', 'Cache-Control s-maxage', and 'Expires' headers.
+ # The maximum amount of time, in seconds, that an object is in a CloudFront
+ # cache before CloudFront forwards another request to your origin to determine
+ # whether the object has been updated. Only effective in the presence of
+ # 'Cache-Control max-age', 'Cache-Control s-maxage', and 'Expires' headers.
max_ttl = 60
- # The minimum amount of time that you want objects to stay in CloudFront caches
- # before CloudFront queries your origin to see whether the object has been
- # updated.
+ # The minimum amount of time that you want objects to stay in CloudFront
+ # caches before CloudFront queries your origin to see whether the object has
+ # been updated.
min_ttl = 0
# The minimum version of the SSL protocol that you want CloudFront to use for
# HTTPS connections. Refer to
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # front_distribution#minimum_protocol_version for possible values.
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudfront_distribution#minimum_protocol_version
+ # for possible values.
minimum_protocol_version = "TLSv1"
- # If set to true, the S3 bucket will only be accessible via CloudFront, and not
- # directly. NOTE: this is only known to work if the S3 Bucket is in us-east-1.
+ # If set to true, the S3 bucket will only be accessible via CloudFront, and
+ # not directly. NOTE: this is only known to work if the S3 Bucket is in
+ # us-east-1.
restrict_bucket_access_to_cloudfront = false
- # A map describing the routing_rule for the aws_s3_website_configuration resource.
- # Describes redirect behavior and conditions when redirects are applied. Conflicts
- # with routing_rules. Use routing_rules if rules contain empty String values.
+ # A map describing the routing_rule for the aws_s3_website_configuration
+ # resource. Describes redirect behavior and conditions when redirects are
+ # applied. Conflicts with routing_rules. Use routing_rules if rules contain
+ # empty String values.
routing_rule = {}
# A json string array containing routing rules for the
@@ -465,53 +472,54 @@ inputs = {
# when routing rules contain empty String values.
routing_rules = null
- # By default, the s3 bucket hosting the website is named after the domain name.
- # Use this configuration to override it with this value instead.
+ # By default, the s3 bucket hosting the website is named after the domain
+ # name. Use this configuration to override it with this value instead.
s3_bucket_override_bucket_name = null
- # The policy directives and their values that CloudFront includes as values for
- # the Content-Security-Policy HTTP response header. When null, the header is
- # omitted.
+ # The policy directives and their values that CloudFront includes as values
+ # for the Content-Security-Policy HTTP response header. When null, the header
+ # is omitted.
security_header_content_security_policy = "default-src 'self'; base-uri 'self'; block-all-mixed-content; font-src 'self' https: data:; form-action 'self'; frame-ancestors 'self'; img-src 'self' data:; object-src 'none'; script-src 'self'; script-src-attr 'none'; style-src 'self' https: 'unsafe-inline'; upgrade-insecure-requests"
- # Determines whether CloudFront includes the X-Content-Type-Options HTTP response
- # header with its value set to nosniff.
+ # Determines whether CloudFront includes the X-Content-Type-Options HTTP
+ # response header with its value set to nosniff.
security_header_enable_nosniff_content_type_options = true
- # Determines whether CloudFront includes the X-Frame-Options HTTP response header
- # and the header’s value. When null, the header is omitted.
+ # Determines whether CloudFront includes the X-Frame-Options HTTP response
+ # header and the header’s value. When null, the header is omitted.
security_header_frame_option = "SAMEORIGIN"
# Determines whether CloudFront includes the Strict-Transport-Security HTTP
# response header and the header’s value. When null, the header is omitted.
security_header_hsts = {"include_subdomains":true,"max_age":15552000,"preload":false}
- # Determines whether CloudFront includes the Referrer-Policy HTTP response header
- # and the header’s value. When null, the header is omitted.
+ # Determines whether CloudFront includes the Referrer-Policy HTTP response
+ # header and the header’s value. When null, the header is omitted.
security_header_referrer_policy = "no-referrer"
- # Determine whether CloudFront includes the X-Xss-Protection HTTP response header
- # and the header’s value. When null, the header is omitted.
+ # Determine whether CloudFront includes the X-Xss-Protection HTTP response
+ # header and the header’s value. When null, the header is omitted.
security_header_xss_protection = {"mode_block":false,"protection":false,"report_uri":null}
- # In older AWS accounts, you must set this variable to true to use the ARN of the
- # CloudFront log delivery AWS account in the access log bucket policy. In newer
- # AWS accounts, you must set this variable to false to use the CanonicalUser ID of
- # the CloudFront log delivery account. If you pick the wrong value, you'll get a
- # perpetual diff on the IAM policy. See
- # https://github.com/terraform-providers/terraform-provider-aws/issues/10158 for
- # context.
+ # In older AWS accounts, you must set this variable to true to use the ARN of
+ # the CloudFront log delivery AWS account in the access log bucket policy. In
+ # newer AWS accounts, you must set this variable to false to use the
+ # CanonicalUser ID of the CloudFront log delivery account. If you pick the
+ # wrong value, you'll get a perpetual diff on the IAM policy. See
+ # https://github.com/terraform-providers/terraform-provider-aws/issues/10158
+ # for context.
use_cloudfront_arn_for_bucket_policy = false
- # Use this element to specify the protocol that users can use to access the files
- # in the origin specified by TargetOriginId when a request matches the path
- # pattern in PathPattern. One of allow-all, https-only, or redirect-to-https.
+ # Use this element to specify the protocol that users can use to access the
+ # files in the origin specified by TargetOriginId when a request matches the
+ # path pattern in PathPattern. One of allow-all, https-only, or
+ # redirect-to-https.
viewer_protocol_policy = "allow-all"
- # If you're using AWS WAF to filter CloudFront requests, the Id of the AWS WAF web
- # ACL that is associated with the distribution. Refer to
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # front_distribution#web_acl_id for more details.
+ # If you're using AWS WAF to filter CloudFront requests, the Id of the AWS WAF
+ # web ACL that is associated with the distribution. Refer to
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudfront_distribution#web_acl_id
+ # for more details.
web_acl_id = null
}
@@ -1051,11 +1059,11 @@ The ARN of the created S3 bucket associated with the website.
diff --git a/docs/reference/services/ci-cd-pipeline/ecs-deploy-runner.md b/docs/reference/services/ci-cd-pipeline/ecs-deploy-runner.md
index 3de166353b..9983622c0a 100644
--- a/docs/reference/services/ci-cd-pipeline/ecs-deploy-runner.md
+++ b/docs/reference/services/ci-cd-pipeline/ecs-deploy-runner.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# ECS Deploy Runner
-View Source
+View Source
Release Notes
@@ -77,7 +77,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -85,7 +85,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [shared account ecs-deploy-runner configuration in the for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production/infrastructure-live/shared/us-west-2/mgmt/ecs-deploy-runner/):
+* [shared account ecs-deploy-runner configuration in the for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production/infrastructure-live/shared/us-west-2/mgmt/ecs-deploy-runner/):
The `examples/for-production` folder contains sample code optimized for direct usage in production. This is code from
the [Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -104,15 +104,15 @@ If you want to deploy this repo in production, check out the following resources
module "ecs_deploy_runner" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/ecs-deploy-runner?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/ecs-deploy-runner?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
# ----------------------------------------------------------------------------------------------------
# Configuration options for the ami-builder container of the ECS deploy runner
- # stack. This container will be used for building AMIs in the CI/CD pipeline using
- # packer. Set to `null` to disable this container.
+ # stack. This container will be used for building AMIs in the CI/CD pipeline
+ # using packer. Set to `null` to disable this container.
ami_builder_config =
- # Configuration options for the docker-image-builder container of the ECS deploy
- # runner stack. This container will be used for building docker images in the
- # CI/CD pipeline. Set to `null` to disable this container.
+ # Configuration options for the docker-image-builder container of the ECS
+ # deploy runner stack. This container will be used for building docker images
+ # in the CI/CD pipeline. Set to `null` to disable this container.
docker_image_builder_config =
# Configuration options for the terraform-applier container of the ECS deploy
- # runner stack. This container will be used for running infrastructure deployment
- # actions (including automated variable updates) in the CI/CD pipeline with
- # Terraform / Terragrunt. Set to `null` to disable this container.
+ # runner stack. This container will be used for running infrastructure
+ # deployment actions (including automated variable updates) in the CI/CD
+ # pipeline with Terraform / Terragrunt. Set to `null` to disable this
+ # container.
terraform_applier_config =
- # Configuration options for the docker-image-builder container of the ECS deploy
- # runner stack. This container will be used for building docker images in the
- # CI/CD pipeline. Set to `null` to disable this container.
+ # Configuration options for the docker-image-builder container of the ECS
+ # deploy runner stack. This container will be used for building docker images
+ # in the CI/CD pipeline. Set to `null` to disable this container.
docker_image_builder_config =
# Configuration options for the terraform-applier container of the ECS deploy
- # runner stack. This container will be used for running infrastructure deployment
- # actions (including automated variable updates) in the CI/CD pipeline with
- # Terraform / Terragrunt. Set to `null` to disable this container.
+ # runner stack. This container will be used for running infrastructure
+ # deployment actions (including automated variable updates) in the CI/CD
+ # pipeline with Terraform / Terragrunt. Set to `null` to disable this
+ # container.
terraform_applier_config =
diff --git a/docs/reference/services/ci-cd-pipeline/jenkins.md b/docs/reference/services/ci-cd-pipeline/jenkins.md
index 89895e19db..bc688981be 100644
--- a/docs/reference/services/ci-cd-pipeline/jenkins.md
+++ b/docs/reference/services/ci-cd-pipeline/jenkins.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Jenkins CI Server
-View Source
+View Source
Release Notes
@@ -68,7 +68,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -76,7 +76,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -98,7 +98,7 @@ If you want to deploy this repo in production, check out the following resources
module "jenkins" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/jenkins?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/jenkins?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -108,19 +108,19 @@ module "jenkins" {
# Manager (ACM).
acm_ssl_certificate_domain =
- # The IDs of the subnets in which to deploy the ALB that runs in front of Jenkins.
- # Must be subnets in var.vpc_id.
+ # The IDs of the subnets in which to deploy the ALB that runs in front of
+ # Jenkins. Must be subnets in var.vpc_id.
alb_subnet_ids =
# The ID of the AMI to run on the Jenkins server. This should be the AMI build
- # from the Packer template jenkins-ubuntu.json. One of var.ami or var.ami_filters
- # is required. Set to null if looking up the ami with filters.
+ # from the Packer template jenkins-ubuntu.json. One of var.ami or
+ # var.ami_filters is required. Set to null if looking up the ami with filters.
ami =
# Properties on the AMI that can be used to lookup a prebuilt AMI for use with
- # Jenkins. You can build the AMI using the Packer template jenkins-ubuntu.json.
- # Only used if var.ami is null. One of var.ami or var.ami_filters is required. Set
- # to null if passing the ami ID directly.
+ # Jenkins. You can build the AMI using the Packer template
+ # jenkins-ubuntu.json. Only used if var.ami is null. One of var.ami or
+ # var.ami_filters is required. Set to null if passing the ami ID directly.
ami_filters =
- # The domain name for the DNS A record to add for Jenkins (e.g. jenkins.foo.com).
- # Must be in the domain managed by var.hosted_zone_id.
+ # The domain name for the DNS A record to add for Jenkins (e.g.
+ # jenkins.foo.com). Must be in the domain managed by var.hosted_zone_id.
domain_name =
# The ID of the Route 53 Hosted Zone in which to create a DNS A record for
@@ -140,11 +140,12 @@ module "jenkins" {
# The instance type to use for the Jenkins server (e.g. t2.medium)
instance_type =
- # The ID of the subnet in which to deploy Jenkins. Must be a subnet in var.vpc_id.
+ # The ID of the subnet in which to deploy Jenkins. Must be a subnet in
+ # var.vpc_id.
jenkins_subnet_id =
- # The amount of memory to give Jenkins (e.g., 1g or 512m). Used for the -Xms and
- # -Xmx settings.
+ # The amount of memory to give Jenkins (e.g., 1g or 512m). Used for the -Xms
+ # and -Xmx settings.
memory =
# The ID of the VPC in which to deploy Jenkins
@@ -154,40 +155,41 @@ module "jenkins" {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and disk
- # space usage) should send notifications. Also used for the alarms if the Jenkins
- # backup job fails.
+ # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
+ # disk space usage) should send notifications. Also used for the alarms if the
+ # Jenkins backup job fails.
alarms_sns_topic_arn = []
- # The IP address ranges in CIDR format from which to allow incoming HTTP requests
- # to Jenkins.
+ # The IP address ranges in CIDR format from which to allow incoming HTTP
+ # requests to Jenkins.
allow_incoming_http_from_cidr_blocks = []
# The IDs of security groups from which to allow incoming HTTP requests to
# Jenkins.
allow_incoming_http_from_security_group_ids = []
- # The IP address ranges in CIDR format from which to allow incoming SSH requests
- # to Jenkins.
+ # The IP address ranges in CIDR format from which to allow incoming SSH
+ # requests to Jenkins.
allow_ssh_from_cidr_blocks = []
- # The IDs of security groups from which to allow incoming SSH requests to Jenkins.
+ # The IDs of security groups from which to allow incoming SSH requests to
+ # Jenkins.
allow_ssh_from_security_group_ids = []
- # How often, in seconds, the backup job is expected to run. This is the same as
- # var.backup_job_schedule_expression, but unfortunately, Terraform offers no way
- # to convert rate expressions to seconds. We add a CloudWatch alarm that triggers
- # if the value of var.backup_job_metric_name and var.backup_job_metric_namespace
- # isn't updated within this time period, as that indicates the backup failed to
- # run.
+ # How often, in seconds, the backup job is expected to run. This is the same
+ # as var.backup_job_schedule_expression, but unfortunately, Terraform offers
+ # no way to convert rate expressions to seconds. We add a CloudWatch alarm
+ # that triggers if the value of var.backup_job_metric_name and
+ # var.backup_job_metric_namespace isn't updated within this time period, as
+ # that indicates the backup failed to run.
backup_job_alarm_period = 86400
# The name for the CloudWatch Metric the AWS lambda backup job will increment
# every time the job completes successfully.
backup_job_metric_name = "jenkins-backup-job"
- # The namespace for the CloudWatch Metric the AWS lambda backup job will increment
- # every time the job completes successfully.
+ # The namespace for the CloudWatch Metric the AWS lambda backup job will
+ # increment every time the job completes successfully.
backup_job_metric_namespace = "Custom/Jenkins"
# A cron or rate expression that specifies how often to take a snapshot of the
@@ -204,13 +206,13 @@ module "jenkins" {
backup_using_lambda = false
# The list of IAM actions this Jenkins server should be allowed to do: e.g.,
- # ec2:*, s3:*, etc. This should be the list of IAM permissions Jenkins needs in
- # this AWS account to run builds. These permissions will be added to the server's
- # IAM role for all resources ('*').
+ # ec2:*, s3:*, etc. This should be the list of IAM permissions Jenkins needs
+ # in this AWS account to run builds. These permissions will be added to the
+ # server's IAM role for all resources ('*').
build_permission_actions = []
- # Cloud init scripts to run on the Jenkins server when it is booting. See the part
- # blocks in
+ # Cloud init scripts to run on the Jenkins server when it is booting. See the
+ # part blocks in
# https://www.terraform.io/docs/providers/template/d/cloudinit_config.html for
# syntax.
cloud_init_parts = {}
@@ -220,13 +222,12 @@ module "jenkins" {
cloudwatch_log_group_kms_key_id = null
# The number of days to retain log events in the log group. Refer to
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # watch_log_group#retention_in_days for all the valid values. When null, the log
- # events are retained forever.
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group#retention_in_days
+ # for all the valid values. When null, the log events are retained forever.
cloudwatch_log_group_retention_in_days = null
- # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys are
- # tag keys and values are tag values.
+ # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys
+ # are tag keys and values are tag values.
cloudwatch_log_group_tags = null
# Set to true to create a public DNS A record in Route53 for Jenkins.
@@ -235,8 +236,9 @@ module "jenkins" {
# A list of custom tags to apply to Jenkins and all other resources.
custom_tags = {}
- # The default OS user for the Jenkins AMI. For AWS Ubuntu AMIs, which is what the
- # Packer template in jenkins-ubunutu.json uses, the default OS user is 'ubuntu'.
+ # The default OS user for the Jenkins AMI. For AWS Ubuntu AMIs, which is what
+ # the Packer template in jenkins-ubunutu.json uses, the default OS user is
+ # 'ubuntu'.
default_user = "ubuntu"
# How often this lifecycle policy should be evaluated, in hours.
@@ -252,63 +254,63 @@ module "jenkins" {
# should be evaluated. Max of 1.
dlm_backup_job_schedule_times = ["03:00"]
- # The ARN of the KMS key used for encrypting the Jenkins EBS volume. The module
- # will grant Jenkins permission to use this key.
+ # The ARN of the KMS key used for encrypting the Jenkins EBS volume. The
+ # module will grant Jenkins permission to use this key.
ebs_kms_key_arn = null
- # Whether or not the provide EBS KMS key ARN is a key alias. If providing the key
- # ID, leave this set to false.
+ # Whether or not the provide EBS KMS key ARN is a key alias. If providing the
+ # key ID, leave this set to false.
ebs_kms_key_arn_is_alias = false
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arn.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arn.
enable_cloudwatch_alarms = true
- # Set to true to add AIM permissions to send logs to CloudWatch. This is useful in
- # combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/log
- # /cloudwatch-log-aggregation-scripts to do log aggregation in CloudWatch.
+ # Set to true to add AIM permissions to send logs to CloudWatch. This is
+ # useful in combination with
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/logs/cloudwatch-log-aggregation-scripts
+ # to do log aggregation in CloudWatch.
enable_cloudwatch_log_aggregation = true
- # Set to true to add IAM permissions to send custom metrics to CloudWatch. This is
- # useful in combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/age
- # ts/cloudwatch-agent to get memory and disk metrics in CloudWatch for your
- # Jenkins server.
+ # Set to true to add IAM permissions to send custom metrics to CloudWatch.
+ # This is useful in combination with
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/agents/cloudwatch-agent
+ # to get memory and disk metrics in CloudWatch for your Jenkins server.
enable_cloudwatch_metrics = true
- # Enable ip-lockdown to block access to the instance metadata. Defaults to true.
+ # Enable ip-lockdown to block access to the instance metadata. Defaults to
+ # true.
enable_ip_lockdown = true
# Set to true to add IAM permissions for ssh-grunt
- # (https://github.com/gruntwork-io/terraform-aws-security/tree/master/modules/ssh-
- # runt), which will allow you to manage SSH access via IAM groups.
+ # (https://github.com/gruntwork-io/terraform-aws-security/tree/master/modules/ssh-grunt),
+ # which will allow you to manage SSH access via IAM groups.
enable_ssh_grunt = true
# A list of IAM role ARNs in other AWS accounts that Jenkins will be able to
# assume to do automated deployment in those accounts.
external_account_auto_deploy_iam_role_arns = []
- # If you are using ssh-grunt and your IAM users / groups are defined in a separate
- # AWS account, you can use this variable to specify the ARN of an IAM role that
- # ssh-grunt can assume to retrieve IAM group and public SSH key info from that
- # account. To omit this variable, set it to an empty string (do NOT use null, or
- # Terraform will complain).
+ # If you are using ssh-grunt and your IAM users / groups are defined in a
+ # separate AWS account, you can use this variable to specify the ARN of an IAM
+ # role that ssh-grunt can assume to retrieve IAM group and public SSH key info
+ # from that account. To omit this variable, set it to an empty string (do NOT
+ # use null, or Terraform will complain).
external_account_ssh_grunt_role_arn = ""
- # The period, in seconds, over which to measure the CPU utilization percentage for
- # the ASG.
+ # The period, in seconds, over which to measure the CPU utilization percentage
+ # for the ASG.
high_asg_cpu_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster CPU utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster CPU utilization
+ # percentage above this threshold.
high_asg_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_cpu_utilization_treat_missing_data = "missing"
# The period, in seconds, over which to measure the root disk utilization
@@ -319,29 +321,29 @@ module "jenkins" {
# percentage above this threshold.
high_asg_disk_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_disk_utilization_treat_missing_data = "missing"
- # The period, in seconds, over which to measure the Memory utilization percentage
- # for the ASG.
+ # The period, in seconds, over which to measure the Memory utilization
+ # percentage for the ASG.
high_asg_memory_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster Memory utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster Memory utilization
+ # percentage above this threshold.
high_asg_memory_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_memory_utilization_treat_missing_data = "missing"
- # Set to true to make the Jenkins ALB an internal ALB that cannot be accessed from
- # the public Internet. We strongly recommend setting this to true to keep Jenkins
- # more secure.
+ # Set to true to make the Jenkins ALB an internal ALB that cannot be accessed
+ # from the public Internet. We strongly recommend setting this to true to keep
+ # Jenkins more secure.
is_internal_alb = true
# The OS device name where the Jenkins EBS volume should be attached
@@ -355,9 +357,8 @@ module "jenkins" {
# Sets how the backup job alarm should handle entering the INSUFFICIENT_DATA
# state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
jenkins_volume_alarm_treat_missing_data = "missing"
# Set to true to encrypt the Jenkins EBS volume.
@@ -367,8 +368,8 @@ module "jenkins" {
# Jenkins server.
jenkins_volume_size = 200
- # The type of volume to use for the EBS volume used by the Jenkins server. Must be
- # one of: standard, gp2, io1, sc1, or st1.
+ # The type of volume to use for the EBS volume used by the Jenkins server.
+ # Must be one of: standard, gp2, io1, sc1, or st1.
jenkins_volume_type = "gp2"
# The name of a Key Pair that can be used to SSH to the Jenkins server. Leave
@@ -382,42 +383,43 @@ module "jenkins" {
# standard, gp2, io1, sc1, or st1.
root_block_device_volume_type = "gp2"
- # The amount of disk space, in GB, to allocate for the root volume of this server.
- # Note that all of Jenkins' data is stored on a separate EBS Volume (see
- # var.jenkins_volume_size), so this root volume is primarily used for the OS, temp
- # folders, apps, etc.
+ # The amount of disk space, in GB, to allocate for the root volume of this
+ # server. Note that all of Jenkins' data is stored on a separate EBS Volume
+ # (see var.jenkins_volume_size), so this root volume is primarily used for the
+ # OS, temp folders, apps, etc.
root_volume_size = 100
- # When true, precreate the CloudWatch Log Group to use for log aggregation from
- # the EC2 instances. This is useful if you wish to customize the CloudWatch Log
- # Group with various settings such as retention periods and KMS encryption. When
- # false, the CloudWatch agent will automatically create a basic log group to use.
+ # When true, precreate the CloudWatch Log Group to use for log aggregation
+ # from the EC2 instances. This is useful if you wish to customize the
+ # CloudWatch Log Group with various settings such as retention periods and KMS
+ # encryption. When false, the CloudWatch agent will automatically create a
+ # basic log group to use.
should_create_cloudwatch_log_group = true
- # If set to true, skip the health check, and start a rolling deployment of Jenkins
- # without waiting for it to initially be in a healthy state. This is primarily
- # useful if the server group is in a broken state and you want to force a
- # deployment anyway.
+ # If set to true, skip the health check, and start a rolling deployment of
+ # Jenkins without waiting for it to initially be in a healthy state. This is
+ # primarily useful if the server group is in a broken state and you want to
+ # force a deployment anyway.
skip_health_check = false
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to this Jenkins server. This value is only used if
- # enable_ssh_grunt=true.
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to this Jenkins server. This value is only used
+ # if enable_ssh_grunt=true.
ssh_grunt_iam_group = "ssh-grunt-users"
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to this Jenkins server with sudo permissions. This value
- # is only used if enable_ssh_grunt=true.
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to this Jenkins server with sudo permissions.
+ # This value is only used if enable_ssh_grunt=true.
ssh_grunt_iam_group_sudo = "ssh-grunt-sudo-users"
# The tenancy of this server. Must be one of: default, dedicated, or host.
tenancy = "default"
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
}
@@ -435,7 +437,7 @@ module "jenkins" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/jenkins?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/jenkins?ref=v0.104.12"
}
inputs = {
@@ -448,19 +450,19 @@ inputs = {
# Manager (ACM).
acm_ssl_certificate_domain =
- # The IDs of the subnets in which to deploy the ALB that runs in front of Jenkins.
- # Must be subnets in var.vpc_id.
+ # The IDs of the subnets in which to deploy the ALB that runs in front of
+ # Jenkins. Must be subnets in var.vpc_id.
alb_subnet_ids =
# The ID of the AMI to run on the Jenkins server. This should be the AMI build
- # from the Packer template jenkins-ubuntu.json. One of var.ami or var.ami_filters
- # is required. Set to null if looking up the ami with filters.
+ # from the Packer template jenkins-ubuntu.json. One of var.ami or
+ # var.ami_filters is required. Set to null if looking up the ami with filters.
ami =
# Properties on the AMI that can be used to lookup a prebuilt AMI for use with
- # Jenkins. You can build the AMI using the Packer template jenkins-ubuntu.json.
- # Only used if var.ami is null. One of var.ami or var.ami_filters is required. Set
- # to null if passing the ami ID directly.
+ # Jenkins. You can build the AMI using the Packer template
+ # jenkins-ubuntu.json. Only used if var.ami is null. One of var.ami or
+ # var.ami_filters is required. Set to null if passing the ami ID directly.
ami_filters =
- # The domain name for the DNS A record to add for Jenkins (e.g. jenkins.foo.com).
- # Must be in the domain managed by var.hosted_zone_id.
+ # The domain name for the DNS A record to add for Jenkins (e.g.
+ # jenkins.foo.com). Must be in the domain managed by var.hosted_zone_id.
domain_name =
# The ID of the Route 53 Hosted Zone in which to create a DNS A record for
@@ -480,11 +482,12 @@ inputs = {
# The instance type to use for the Jenkins server (e.g. t2.medium)
instance_type =
- # The ID of the subnet in which to deploy Jenkins. Must be a subnet in var.vpc_id.
+ # The ID of the subnet in which to deploy Jenkins. Must be a subnet in
+ # var.vpc_id.
jenkins_subnet_id =
- # The amount of memory to give Jenkins (e.g., 1g or 512m). Used for the -Xms and
- # -Xmx settings.
+ # The amount of memory to give Jenkins (e.g., 1g or 512m). Used for the -Xms
+ # and -Xmx settings.
memory =
# The ID of the VPC in which to deploy Jenkins
@@ -494,40 +497,41 @@ inputs = {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and disk
- # space usage) should send notifications. Also used for the alarms if the Jenkins
- # backup job fails.
+ # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
+ # disk space usage) should send notifications. Also used for the alarms if the
+ # Jenkins backup job fails.
alarms_sns_topic_arn = []
- # The IP address ranges in CIDR format from which to allow incoming HTTP requests
- # to Jenkins.
+ # The IP address ranges in CIDR format from which to allow incoming HTTP
+ # requests to Jenkins.
allow_incoming_http_from_cidr_blocks = []
# The IDs of security groups from which to allow incoming HTTP requests to
# Jenkins.
allow_incoming_http_from_security_group_ids = []
- # The IP address ranges in CIDR format from which to allow incoming SSH requests
- # to Jenkins.
+ # The IP address ranges in CIDR format from which to allow incoming SSH
+ # requests to Jenkins.
allow_ssh_from_cidr_blocks = []
- # The IDs of security groups from which to allow incoming SSH requests to Jenkins.
+ # The IDs of security groups from which to allow incoming SSH requests to
+ # Jenkins.
allow_ssh_from_security_group_ids = []
- # How often, in seconds, the backup job is expected to run. This is the same as
- # var.backup_job_schedule_expression, but unfortunately, Terraform offers no way
- # to convert rate expressions to seconds. We add a CloudWatch alarm that triggers
- # if the value of var.backup_job_metric_name and var.backup_job_metric_namespace
- # isn't updated within this time period, as that indicates the backup failed to
- # run.
+ # How often, in seconds, the backup job is expected to run. This is the same
+ # as var.backup_job_schedule_expression, but unfortunately, Terraform offers
+ # no way to convert rate expressions to seconds. We add a CloudWatch alarm
+ # that triggers if the value of var.backup_job_metric_name and
+ # var.backup_job_metric_namespace isn't updated within this time period, as
+ # that indicates the backup failed to run.
backup_job_alarm_period = 86400
# The name for the CloudWatch Metric the AWS lambda backup job will increment
# every time the job completes successfully.
backup_job_metric_name = "jenkins-backup-job"
- # The namespace for the CloudWatch Metric the AWS lambda backup job will increment
- # every time the job completes successfully.
+ # The namespace for the CloudWatch Metric the AWS lambda backup job will
+ # increment every time the job completes successfully.
backup_job_metric_namespace = "Custom/Jenkins"
# A cron or rate expression that specifies how often to take a snapshot of the
@@ -544,13 +548,13 @@ inputs = {
backup_using_lambda = false
# The list of IAM actions this Jenkins server should be allowed to do: e.g.,
- # ec2:*, s3:*, etc. This should be the list of IAM permissions Jenkins needs in
- # this AWS account to run builds. These permissions will be added to the server's
- # IAM role for all resources ('*').
+ # ec2:*, s3:*, etc. This should be the list of IAM permissions Jenkins needs
+ # in this AWS account to run builds. These permissions will be added to the
+ # server's IAM role for all resources ('*').
build_permission_actions = []
- # Cloud init scripts to run on the Jenkins server when it is booting. See the part
- # blocks in
+ # Cloud init scripts to run on the Jenkins server when it is booting. See the
+ # part blocks in
# https://www.terraform.io/docs/providers/template/d/cloudinit_config.html for
# syntax.
cloud_init_parts = {}
@@ -560,13 +564,12 @@ inputs = {
cloudwatch_log_group_kms_key_id = null
# The number of days to retain log events in the log group. Refer to
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # watch_log_group#retention_in_days for all the valid values. When null, the log
- # events are retained forever.
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group#retention_in_days
+ # for all the valid values. When null, the log events are retained forever.
cloudwatch_log_group_retention_in_days = null
- # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys are
- # tag keys and values are tag values.
+ # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys
+ # are tag keys and values are tag values.
cloudwatch_log_group_tags = null
# Set to true to create a public DNS A record in Route53 for Jenkins.
@@ -575,8 +578,9 @@ inputs = {
# A list of custom tags to apply to Jenkins and all other resources.
custom_tags = {}
- # The default OS user for the Jenkins AMI. For AWS Ubuntu AMIs, which is what the
- # Packer template in jenkins-ubunutu.json uses, the default OS user is 'ubuntu'.
+ # The default OS user for the Jenkins AMI. For AWS Ubuntu AMIs, which is what
+ # the Packer template in jenkins-ubunutu.json uses, the default OS user is
+ # 'ubuntu'.
default_user = "ubuntu"
# How often this lifecycle policy should be evaluated, in hours.
@@ -592,63 +596,63 @@ inputs = {
# should be evaluated. Max of 1.
dlm_backup_job_schedule_times = ["03:00"]
- # The ARN of the KMS key used for encrypting the Jenkins EBS volume. The module
- # will grant Jenkins permission to use this key.
+ # The ARN of the KMS key used for encrypting the Jenkins EBS volume. The
+ # module will grant Jenkins permission to use this key.
ebs_kms_key_arn = null
- # Whether or not the provide EBS KMS key ARN is a key alias. If providing the key
- # ID, leave this set to false.
+ # Whether or not the provide EBS KMS key ARN is a key alias. If providing the
+ # key ID, leave this set to false.
ebs_kms_key_arn_is_alias = false
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arn.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arn.
enable_cloudwatch_alarms = true
- # Set to true to add AIM permissions to send logs to CloudWatch. This is useful in
- # combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/log
- # /cloudwatch-log-aggregation-scripts to do log aggregation in CloudWatch.
+ # Set to true to add AIM permissions to send logs to CloudWatch. This is
+ # useful in combination with
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/logs/cloudwatch-log-aggregation-scripts
+ # to do log aggregation in CloudWatch.
enable_cloudwatch_log_aggregation = true
- # Set to true to add IAM permissions to send custom metrics to CloudWatch. This is
- # useful in combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/age
- # ts/cloudwatch-agent to get memory and disk metrics in CloudWatch for your
- # Jenkins server.
+ # Set to true to add IAM permissions to send custom metrics to CloudWatch.
+ # This is useful in combination with
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/agents/cloudwatch-agent
+ # to get memory and disk metrics in CloudWatch for your Jenkins server.
enable_cloudwatch_metrics = true
- # Enable ip-lockdown to block access to the instance metadata. Defaults to true.
+ # Enable ip-lockdown to block access to the instance metadata. Defaults to
+ # true.
enable_ip_lockdown = true
# Set to true to add IAM permissions for ssh-grunt
- # (https://github.com/gruntwork-io/terraform-aws-security/tree/master/modules/ssh-
- # runt), which will allow you to manage SSH access via IAM groups.
+ # (https://github.com/gruntwork-io/terraform-aws-security/tree/master/modules/ssh-grunt),
+ # which will allow you to manage SSH access via IAM groups.
enable_ssh_grunt = true
# A list of IAM role ARNs in other AWS accounts that Jenkins will be able to
# assume to do automated deployment in those accounts.
external_account_auto_deploy_iam_role_arns = []
- # If you are using ssh-grunt and your IAM users / groups are defined in a separate
- # AWS account, you can use this variable to specify the ARN of an IAM role that
- # ssh-grunt can assume to retrieve IAM group and public SSH key info from that
- # account. To omit this variable, set it to an empty string (do NOT use null, or
- # Terraform will complain).
+ # If you are using ssh-grunt and your IAM users / groups are defined in a
+ # separate AWS account, you can use this variable to specify the ARN of an IAM
+ # role that ssh-grunt can assume to retrieve IAM group and public SSH key info
+ # from that account. To omit this variable, set it to an empty string (do NOT
+ # use null, or Terraform will complain).
external_account_ssh_grunt_role_arn = ""
- # The period, in seconds, over which to measure the CPU utilization percentage for
- # the ASG.
+ # The period, in seconds, over which to measure the CPU utilization percentage
+ # for the ASG.
high_asg_cpu_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster CPU utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster CPU utilization
+ # percentage above this threshold.
high_asg_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_cpu_utilization_treat_missing_data = "missing"
# The period, in seconds, over which to measure the root disk utilization
@@ -659,29 +663,29 @@ inputs = {
# percentage above this threshold.
high_asg_disk_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_disk_utilization_treat_missing_data = "missing"
- # The period, in seconds, over which to measure the Memory utilization percentage
- # for the ASG.
+ # The period, in seconds, over which to measure the Memory utilization
+ # percentage for the ASG.
high_asg_memory_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster Memory utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster Memory utilization
+ # percentage above this threshold.
high_asg_memory_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_memory_utilization_treat_missing_data = "missing"
- # Set to true to make the Jenkins ALB an internal ALB that cannot be accessed from
- # the public Internet. We strongly recommend setting this to true to keep Jenkins
- # more secure.
+ # Set to true to make the Jenkins ALB an internal ALB that cannot be accessed
+ # from the public Internet. We strongly recommend setting this to true to keep
+ # Jenkins more secure.
is_internal_alb = true
# The OS device name where the Jenkins EBS volume should be attached
@@ -695,9 +699,8 @@ inputs = {
# Sets how the backup job alarm should handle entering the INSUFFICIENT_DATA
# state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
jenkins_volume_alarm_treat_missing_data = "missing"
# Set to true to encrypt the Jenkins EBS volume.
@@ -707,8 +710,8 @@ inputs = {
# Jenkins server.
jenkins_volume_size = 200
- # The type of volume to use for the EBS volume used by the Jenkins server. Must be
- # one of: standard, gp2, io1, sc1, or st1.
+ # The type of volume to use for the EBS volume used by the Jenkins server.
+ # Must be one of: standard, gp2, io1, sc1, or st1.
jenkins_volume_type = "gp2"
# The name of a Key Pair that can be used to SSH to the Jenkins server. Leave
@@ -722,42 +725,43 @@ inputs = {
# standard, gp2, io1, sc1, or st1.
root_block_device_volume_type = "gp2"
- # The amount of disk space, in GB, to allocate for the root volume of this server.
- # Note that all of Jenkins' data is stored on a separate EBS Volume (see
- # var.jenkins_volume_size), so this root volume is primarily used for the OS, temp
- # folders, apps, etc.
+ # The amount of disk space, in GB, to allocate for the root volume of this
+ # server. Note that all of Jenkins' data is stored on a separate EBS Volume
+ # (see var.jenkins_volume_size), so this root volume is primarily used for the
+ # OS, temp folders, apps, etc.
root_volume_size = 100
- # When true, precreate the CloudWatch Log Group to use for log aggregation from
- # the EC2 instances. This is useful if you wish to customize the CloudWatch Log
- # Group with various settings such as retention periods and KMS encryption. When
- # false, the CloudWatch agent will automatically create a basic log group to use.
+ # When true, precreate the CloudWatch Log Group to use for log aggregation
+ # from the EC2 instances. This is useful if you wish to customize the
+ # CloudWatch Log Group with various settings such as retention periods and KMS
+ # encryption. When false, the CloudWatch agent will automatically create a
+ # basic log group to use.
should_create_cloudwatch_log_group = true
- # If set to true, skip the health check, and start a rolling deployment of Jenkins
- # without waiting for it to initially be in a healthy state. This is primarily
- # useful if the server group is in a broken state and you want to force a
- # deployment anyway.
+ # If set to true, skip the health check, and start a rolling deployment of
+ # Jenkins without waiting for it to initially be in a healthy state. This is
+ # primarily useful if the server group is in a broken state and you want to
+ # force a deployment anyway.
skip_health_check = false
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to this Jenkins server. This value is only used if
- # enable_ssh_grunt=true.
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to this Jenkins server. This value is only used
+ # if enable_ssh_grunt=true.
ssh_grunt_iam_group = "ssh-grunt-users"
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to this Jenkins server with sudo permissions. This value
- # is only used if enable_ssh_grunt=true.
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to this Jenkins server with sudo permissions.
+ # This value is only used if enable_ssh_grunt=true.
ssh_grunt_iam_group_sudo = "ssh-grunt-sudo-users"
# The tenancy of this server. Must be one of: default, dedicated, or host.
tenancy = "default"
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
}
@@ -1584,11 +1588,11 @@ The ID of the Security Group attached to the Jenkins EC2 Instance
diff --git a/docs/reference/services/data-storage/amazon-aurora.md b/docs/reference/services/data-storage/amazon-aurora.md
index 64659231ce..b73d471599 100644
--- a/docs/reference/services/data-storage/amazon-aurora.md
+++ b/docs/reference/services/data-storage/amazon-aurora.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon Aurora
-View Source
+View Source
Release Notes
@@ -71,7 +71,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -79,7 +79,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the [Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/),
and it shows you how we build an end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -102,7 +102,7 @@ If you want to deploy this repo in production, check out the following resources
module "aurora" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/aurora?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/aurora?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -112,9 +112,9 @@ module "aurora" {
# contain subnets in var.vpc_id.
aurora_subnet_ids =
- # The name used to namespace all the Aurora resources created by these templates,
- # including the cluster and cluster instances (e.g. drupaldb). Must be unique in
- # this region. Must be a lowercase string.
+ # The name used to namespace all the Aurora resources created by these
+ # templates, including the cluster and cluster instances (e.g. drupaldb). Must
+ # be unique in this region. Must be a lowercase string.
name =
# The ID of the VPC in which to deploy Aurora.
@@ -124,80 +124,83 @@ module "aurora" {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and disk
- # space usage) should send notifications. Also used for the alarms if the share
- # snapshot backup job fails.
+ # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
+ # disk space usage) should send notifications. Also used for the alarms if the
+ # share snapshot backup job fails.
alarms_sns_topic_arns = []
- # The list of network CIDR blocks to allow network access to Aurora from. One of
- # var.allow_connections_from_cidr_blocks or
- # var.allow_connections_from_security_groups must be specified for the database to
- # be reachable.
+ # The list of network CIDR blocks to allow network access to Aurora from. One
+ # of var.allow_connections_from_cidr_blocks or
+ # var.allow_connections_from_security_groups must be specified for the
+ # database to be reachable.
allow_connections_from_cidr_blocks = []
- # The list of IDs or Security Groups to allow network access to Aurora from. All
- # security groups must either be in the VPC specified by var.vpc_id, or a peered
- # VPC with the VPC specified by var.vpc_id. One of
+ # The list of IDs or Security Groups to allow network access to Aurora from.
+ # All security groups must either be in the VPC specified by var.vpc_id, or a
+ # peered VPC with the VPC specified by var.vpc_id. One of
# var.allow_connections_from_cidr_blocks or
- # var.allow_connections_from_security_groups must be specified for the database to
- # be reachable.
+ # var.allow_connections_from_security_groups must be specified for the
+ # database to be reachable.
allow_connections_from_security_groups = []
# Enable to allow major engine version upgrades when changing engine versions.
allow_major_version_upgrade = false
- # Specifies whether any cluster modifications are applied immediately, or during
- # the next maintenance window. Note that cluster modifications may cause degraded
- # performance or downtime.
+ # Specifies whether any cluster modifications are applied immediately, or
+ # during the next maintenance window. Note that cluster modifications may
+ # cause degraded performance or downtime.
apply_immediately = false
# Configure the auto minor version upgrade behavior. This is applied to the
- # cluster instances and indicates if the automatic minor version upgrade of the
- # engine is allowed. Default value is true.
+ # cluster instances and indicates if the automatic minor version upgrade of
+ # the engine is allowed. Default value is true.
auto_minor_version_upgrade = true
- # How often, in seconds, the backup job is expected to run. This is the same as
- # var.schedule_expression, but unfortunately, Terraform offers no way to convert
- # rate expressions to seconds. We add a CloudWatch alarm that triggers if the
- # metric in var.create_snapshot_cloudwatch_metric_namespace isn't updated within
- # this time period, as that indicates the backup failed to run.
+ # How often, in seconds, the backup job is expected to run. This is the same
+ # as var.schedule_expression, but unfortunately, Terraform offers no way to
+ # convert rate expressions to seconds. We add a CloudWatch alarm that triggers
+ # if the metric in var.create_snapshot_cloudwatch_metric_namespace isn't
+ # updated within this time period, as that indicates the backup failed to run.
backup_job_alarm_period = 3600
# Sets how the backup job alarm should handle entering the INSUFFICIENT_DATA
# state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
backup_job_alarm_treat_missing_data = "missing"
- # How many days to keep backup snapshots around before cleaning them up. Max: 35
+ # How many days to keep backup snapshots around before cleaning them up. Max:
+ # 35
backup_retention_period = 30
# Copy all the Aurora cluster tags to snapshots. Default is false.
copy_tags_to_snapshot = false
- # Set to true if you want a DNS record automatically created and pointed at the
- # RDS endpoints.
+ # Set to true if you want a DNS record automatically created and pointed at
+ # the RDS endpoints.
create_route53_entry = false
- # The namespace to use for the CloudWatch metric we report every time a new RDS
- # snapshot is created. We add a CloudWatch alarm on this metric to notify us if
- # the backup job fails to run for any reason. Defaults to the cluster name.
+ # The namespace to use for the CloudWatch metric we report every time a new
+ # RDS snapshot is created. We add a CloudWatch alarm on this metric to notify
+ # us if the backup job fails to run for any reason. Defaults to the cluster
+ # name.
create_snapshot_cloudwatch_metric_namespace = null
- # A map of custom tags to apply to the RDS cluster and all associated resources
- # created for it. The key is the tag name and the value is the tag value.
+ # A map of custom tags to apply to the RDS cluster and all associated
+ # resources created for it. The key is the tag name and the value is the tag
+ # value.
custom_tags = {}
- # Parameters for the cpu usage widget to output for use in a CloudWatch dashboard.
+ # Parameters for the cpu usage widget to output for use in a CloudWatch
+ # dashboard.
dashboard_cpu_usage_widget_parameters = {"height":6,"period":60,"width":8}
- # Parameters for the database connections widget to output for use in a CloudWatch
- # dashboard.
+ # Parameters for the database connections widget to output for use in a
+ # CloudWatch dashboard.
dashboard_db_connections_widget_parameters = {"height":6,"period":60,"width":8}
- # Parameters for the available disk space widget to output for use in a CloudWatch
- # dashboard.
+ # Parameters for the available disk space widget to output for use in a
+ # CloudWatch dashboard.
dashboard_disk_space_widget_parameters = {"height":6,"period":60,"width":8}
# Parameters for the available memory widget to output for use in a CloudWatch
@@ -212,43 +215,42 @@ module "aurora" {
# dashboard.
dashboard_write_latency_widget_parameters = {"height":6,"period":60,"width":8}
- # Configure a custom parameter group for the RDS DB cluster. This will create a
- # new parameter group with the given parameters. When null, the database will be
- # launched with the default parameter group.
+ # Configure a custom parameter group for the RDS DB cluster. This will create
+ # a new parameter group with the given parameters. When null, the database
+ # will be launched with the default parameter group.
db_cluster_custom_parameter_group = null
- # The friendly name or ARN of an AWS Secrets Manager secret that contains database
- # configuration information in the format outlined by this document:
+ # The friendly name or ARN of an AWS Secrets Manager secret that contains
+ # database configuration information in the format outlined by this document:
# https://docs.aws.amazon.com/secretsmanager/latest/userguide/best-practices.html.
- # The engine, username, password, dbname, and port fields must be included in the
- # JSON. Note that even with this precaution, this information will be stored in
- # plaintext in the Terraform state file! See the following blog post for more
- # details:
- # https://blog.gruntwork.io/a-comprehensive-guide-to-managing-secrets-in-your-terr
- # form-code-1d586955ace1. If you do not wish to use Secrets Manager, leave this as
- # null, and use the master_username, master_password, db_name, engine, and port
- # variables.
+ # The engine, username, password, dbname, and port fields must be included in
+ # the JSON. Note that even with this precaution, this information will be
+ # stored in plaintext in the Terraform state file! See the following blog post
+ # for more details:
+ # https://blog.gruntwork.io/a-comprehensive-guide-to-managing-secrets-in-your-terraform-code-1d586955ace1.
+ # If you do not wish to use Secrets Manager, leave this as null, and use the
+ # master_username, master_password, db_name, engine, and port variables.
db_config_secrets_manager_id = null
- # Configure a custom parameter group for the RDS DB Instance. This will create a
- # new parameter group with the given parameters. When null, the database will be
- # launched with the default parameter group.
+ # Configure a custom parameter group for the RDS DB Instance. This will create
+ # a new parameter group with the given parameters. When null, the database
+ # will be launched with the default parameter group.
db_instance_custom_parameter_group = null
- # The name for your database of up to 8 alpha-numeric characters. If you do not
- # provide a name, Amazon RDS will not create a database in the DB cluster you are
- # creating. This can also be provided via AWS Secrets Manager. See the description
- # of db_config_secrets_manager_id. A value here overrides the value in
- # db_config_secrets_manager_id.
+ # The name for your database of up to 8 alpha-numeric characters. If you do
+ # not provide a name, Amazon RDS will not create a database in the DB cluster
+ # you are creating. This can also be provided via AWS Secrets Manager. See the
+ # description of db_config_secrets_manager_id. A value here overrides the
+ # value in db_config_secrets_manager_id.
db_name = null
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arn.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arn.
enable_cloudwatch_alarms = true
- # When true, enable CloudWatch metrics for the manual snapshots created for the
- # purpose of sharing with another account.
+ # When true, enable CloudWatch metrics for the manual snapshots created for
+ # the purpose of sharing with another account.
enable_cloudwatch_metrics = true
# Enable deletion protection on the database instance. If this is enabled, the
@@ -257,8 +259,8 @@ module "aurora" {
# Set to true to enable alarms related to performance, such as read and write
# latency alarms. Set to false to disable those alarms if you aren't sure what
- # would be reasonable perf numbers for your RDS set up or if those numbers are too
- # unpredictable.
+ # would be reasonable perf numbers for your RDS set up or if those numbers are
+ # too unpredictable.
enable_perf_alarms = true
# When true, enable CloudWatch alarms for the manual snapshots created for the
@@ -266,15 +268,15 @@ module "aurora" {
# var.share_snapshot_with_another_account is true.
enable_share_snapshot_cloudwatch_alarms = true
- # If non-empty, the Aurora cluster will export the specified logs to Cloudwatch.
- # Must be zero or more of: audit, error, general and slowquery
+ # If non-empty, the Aurora cluster will export the specified logs to
+ # Cloudwatch. Must be zero or more of: audit, error, general and slowquery
enabled_cloudwatch_logs_exports = []
- # The name of the database engine to be used for this DB cluster. Valid Values:
- # aurora (for MySQL 5.6-compatible Aurora), aurora-mysql (for MySQL 5.7-compatible
- # Aurora), and aurora-postgresql. This can also be provided via AWS Secrets
- # Manager. See the description of db_config_secrets_manager_id. A value here
- # overrides the value in db_config_secrets_manager_id.
+ # The name of the database engine to be used for this DB cluster. Valid
+ # Values: aurora (for MySQL 5.6-compatible Aurora), aurora-mysql (for MySQL
+ # 5.7-compatible Aurora), and aurora-postgresql. This can also be provided via
+ # AWS Secrets Manager. See the description of db_config_secrets_manager_id. A
+ # value here overrides the value in db_config_secrets_manager_id.
engine = null
# The version of aurora to run - provisioned or serverless.
@@ -282,80 +284,81 @@ module "aurora" {
# The Amazon Aurora DB engine version for the selected engine and engine_mode.
# Note: Starting with Aurora MySQL 2.03.2, Aurora engine versions have the
- # following syntax .mysql_aurora.. e.g.
- # 5.7.mysql_aurora.2.08.1.
+ # following syntax .mysql_aurora..
+ # e.g. 5.7.mysql_aurora.2.08.1.
engine_version = null
- # The period, in seconds, over which to measure the CPU utilization percentage.
+ # The period, in seconds, over which to measure the CPU utilization
+ # percentage.
high_cpu_utilization_period = 60
- # Trigger an alarm if the DB instance has a CPU utilization percentage above this
- # threshold.
+ # Trigger an alarm if the DB instance has a CPU utilization percentage above
+ # this threshold.
high_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_cpu_utilization_treat_missing_data = "missing"
# The period, in seconds, over which to measure the read latency.
high_read_latency_period = 60
- # Trigger an alarm if the DB instance read latency (average amount of time taken
- # per disk I/O operation), in seconds, is above this threshold.
+ # Trigger an alarm if the DB instance read latency (average amount of time
+ # taken per disk I/O operation), in seconds, is above this threshold.
high_read_latency_threshold = 5
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_read_latency_treat_missing_data = "missing"
# The period, in seconds, over which to measure the write latency.
high_write_latency_period = 60
- # Trigger an alarm if the DB instance write latency (average amount of time taken
- # per disk I/O operation), in seconds, is above this threshold.
+ # Trigger an alarm if the DB instance write latency (average amount of time
+ # taken per disk I/O operation), in seconds, is above this threshold.
high_write_latency_threshold = 5
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_write_latency_treat_missing_data = "missing"
# The ID of the hosted zone in which to write DNS records
hosted_zone_id = null
- # Specifies whether mappings of AWS Identity and Access Management (IAM) accounts
- # to database accounts is enabled. Disabled by default.
+ # Specifies whether mappings of AWS Identity and Access Management (IAM)
+ # accounts to database accounts is enabled. Disabled by default.
iam_database_authentication_enabled = false
- # The number of DB instances, including the primary, to run in the RDS cluster.
- # Only used when var.engine_mode is set to provisioned.
+ # The number of DB instances, including the primary, to run in the RDS
+ # cluster. Only used when var.engine_mode is set to provisioned.
instance_count = 1
# The instance type to use for the db (e.g. db.r3.large). Only used when
# var.engine_mode is set to provisioned.
instance_type = "db.t3.small"
- # The ARN of a KMS key that should be used to encrypt data on disk. Only used if
- # var.storage_encrypted is true. If you leave this null, the default RDS KMS key
- # for the account will be used.
+ # The ARN of a KMS key that should be used to encrypt data on disk. Only used
+ # if var.storage_encrypted is true. If you leave this null, the default RDS
+ # KMS key for the account will be used.
kms_key_arn = null
# The period, in seconds, over which to measure the available free disk space.
low_disk_space_available_period = 60
- # Trigger an alarm if the amount of disk space, in Bytes, on the DB instance drops
- # below this threshold.
+ # Trigger an alarm if the amount of disk space, in Bytes, on the DB instance
+ # drops below this threshold.
low_disk_space_available_threshold = 1000000000
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
low_disk_space_available_treat_missing_data = "missing"
# The period, in seconds, over which to measure the available free memory.
@@ -365,10 +368,10 @@ module "aurora" {
# drops below this threshold.
low_memory_available_threshold = 100000000
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
low_memory_available_treat_missing_data = "missing"
# The value to use for the master password of the database. This can also be
@@ -390,100 +393,103 @@ module "aurora" {
# The ARN for the KMS key to encrypt Performance Insights data.
performance_insights_kms_key_id = null
- # The port the DB will listen on (e.g. 3306). This can also be provided via AWS
- # Secrets Manager. See the description of db_config_secrets_manager_id. A value
- # here overrides the value in db_config_secrets_manager_id.
+ # The port the DB will listen on (e.g. 3306). This can also be provided via
+ # AWS Secrets Manager. See the description of db_config_secrets_manager_id. A
+ # value here overrides the value in db_config_secrets_manager_id.
port = null
# The daily time range during which automated backups are created (e.g.
- # 04:00-09:00). Time zone is UTC. Performance may be degraded while a backup runs.
+ # 04:00-09:00). Time zone is UTC. Performance may be degraded while a backup
+ # runs.
preferred_backup_window = "06:00-07:00"
- # The weekly day and time range during which cluster maintenance can occur (e.g.
- # wed:04:00-wed:04:30). Time zone is UTC. Performance may be degraded or there may
- # even be a downtime during maintenance windows.
+ # The weekly day and time range during which cluster maintenance can occur
+ # (e.g. wed:04:00-wed:04:30). Time zone is UTC. Performance may be degraded or
+ # there may even be a downtime during maintenance windows.
preferred_maintenance_window = "sun:07:00-sun:08:00"
- # The domain name to create a route 53 record for the primary endpoint of the RDS
- # database.
+ # The domain name to create a route 53 record for the primary endpoint of the
+ # RDS database.
primary_domain_name = null
- # If you wish to make your database accessible from the public Internet, set this
- # flag to true (WARNING: NOT RECOMMENDED FOR REGULAR USAGE!!). The default is
- # false, which means the database is only accessible from within the VPC, which is
- # much more secure. This flag MUST be false for serverless mode.
+ # If you wish to make your database accessible from the public Internet, set
+ # this flag to true (WARNING: NOT RECOMMENDED FOR REGULAR USAGE!!). The
+ # default is false, which means the database is only accessible from within
+ # the VPC, which is much more secure. This flag MUST be false for serverless
+ # mode.
publicly_accessible = false
- # The domain name to create a route 53 record for the reader endpoint of the RDS
- # database. Note that Aurora Serverless does not have reader endpoints, so this
- # option is ignored when engine_mode is set to serverless.
+ # The domain name to create a route 53 record for the reader endpoint of the
+ # RDS database. Note that Aurora Serverless does not have reader endpoints, so
+ # this option is ignored when engine_mode is set to serverless.
reader_domain_name = null
- # If non-empty, the Aurora cluster will be restored from the given source cluster
- # using the latest restorable time. Can only be used if snapshot_identifier is
- # null. For more information see
+ # If non-empty, the Aurora cluster will be restored from the given source
+ # cluster using the latest restorable time. Can only be used if
+ # snapshot_identifier is null. For more information see
# https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_PIT.html
restore_source_cluster_identifier = null
- # Only used if 'restore_source_cluster_identifier' is non-empty. Type of restore
- # to be performed. Valid options are 'full-copy' and 'copy-on-write'.
- # https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Clo
- # e.html
+ # Only used if 'restore_source_cluster_identifier' is non-empty. Type of
+ # restore to be performed. Valid options are 'full-copy' and 'copy-on-write'.
+ # https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Clone.html
restore_type = null
# Whether to enable automatic pause. A DB cluster can be paused only when it's
# idle (it has no connections). If a DB cluster is paused for more than seven
- # days, the DB cluster might be backed up with a snapshot. In this case, the DB
- # cluster is restored when there is a request to connect to it. Only used when
- # var.engine_mode is set to serverless.
+ # days, the DB cluster might be backed up with a snapshot. In this case, the
+ # DB cluster is restored when there is a request to connect to it. Only used
+ # when var.engine_mode is set to serverless.
scaling_configuration_auto_pause = true
- # The maximum capacity. The maximum capacity must be greater than or equal to the
- # minimum capacity. Valid capacity values are 2, 4, 8, 16, 32, 64, 128, and 256.
- # Only used when var.engine_mode is set to serverless.
+ # The maximum capacity. The maximum capacity must be greater than or equal to
+ # the minimum capacity. Valid capacity values are 2, 4, 8, 16, 32, 64, 128,
+ # and 256. Only used when var.engine_mode is set to serverless.
scaling_configuration_max_capacity = 256
scaling_configuration_max_capacity_V2 = null
- # The minimum capacity. The minimum capacity must be lesser than or equal to the
- # maximum capacity. Valid capacity values are 2, 4, 8, 16, 32, 64, 128, and 256.
- # Only used when var.engine_mode is set to serverless.
+ # The minimum capacity. The minimum capacity must be lesser than or equal to
+ # the maximum capacity. Valid capacity values are 2, 4, 8, 16, 32, 64, 128,
+ # and 256. Only used when var.engine_mode is set to serverless.
scaling_configuration_min_capacity = 2
scaling_configuration_min_capacity_V2 = null
- # The time, in seconds, before an Aurora DB cluster in serverless mode is paused.
- # Valid values are 300 through 86400. Only used when var.engine_mode is set to
- # serverless.
+ # The time, in seconds, before an Aurora DB cluster in serverless mode is
+ # paused. Valid values are 300 through 86400. Only used when var.engine_mode
+ # is set to serverless.
scaling_configuration_seconds_until_auto_pause = 300
- # The maximum number of snapshots to keep around for the purpose of cross account
- # sharing. Once this number is exceeded, a lambda function will delete the oldest
- # snapshots. Only used if var.share_snapshot_with_another_account is true.
+ # The maximum number of snapshots to keep around for the purpose of cross
+ # account sharing. Once this number is exceeded, a lambda function will delete
+ # the oldest snapshots. Only used if var.share_snapshot_with_another_account
+ # is true.
share_snapshot_max_snapshots = 30
# An expression that defines how often to run the lambda function to take
- # snapshots for the purpose of cross account sharing. For example, cron(0 20 * * ?
- # *) or rate(5 minutes). Required if var.share_snapshot_with_another_account is
- # true
+ # snapshots for the purpose of cross account sharing. For example, cron(0 20 *
+ # * ? *) or rate(5 minutes). Required if
+ # var.share_snapshot_with_another_account is true
share_snapshot_schedule_expression = null
- # The ID of the AWS Account that the snapshot should be shared with. Required if
- # var.share_snapshot_with_another_account is true.
+ # The ID of the AWS Account that the snapshot should be shared with. Required
+ # if var.share_snapshot_with_another_account is true.
share_snapshot_with_account_id = null
- # If set to true, take periodic snapshots of the Aurora DB that should be shared
- # with another account.
+ # If set to true, take periodic snapshots of the Aurora DB that should be
+ # shared with another account.
share_snapshot_with_another_account = false
# Determines whether a final DB snapshot is created before the DB instance is
- # deleted. Be very careful setting this to true; if you do, and you delete this DB
- # instance, you will not have any backups of the data! You almost never want to
- # set this to true, unless you are doing automated or manual testing.
+ # deleted. Be very careful setting this to true; if you do, and you delete
+ # this DB instance, you will not have any backups of the data! You almost
+ # never want to set this to true, unless you are doing automated or manual
+ # testing.
skip_final_snapshot = false
- # If non-null, the RDS Instance will be restored from the given Snapshot ID. This
- # is the Snapshot ID you'd find in the RDS console, e.g:
+ # If non-null, the RDS Instance will be restored from the given Snapshot ID.
+ # This is the Snapshot ID you'd find in the RDS console, e.g:
# rds:production-2015-06-26-06-05.
snapshot_identifier = null
@@ -492,8 +498,8 @@ module "aurora" {
# snapshots. Uses the default aws/rds key in KMS.
storage_encrypted = true
- # Trigger an alarm if the number of connections to the DB instance goes above this
- # threshold.
+ # Trigger an alarm if the number of connections to the DB instance goes above
+ # this threshold.
too_many_db_connections_threshold = null
}
@@ -516,7 +522,7 @@ module "aurora" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/aurora?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/aurora?ref=v0.104.12"
}
inputs = {
@@ -529,9 +535,9 @@ inputs = {
# contain subnets in var.vpc_id.
aurora_subnet_ids =
- # The name used to namespace all the Aurora resources created by these templates,
- # including the cluster and cluster instances (e.g. drupaldb). Must be unique in
- # this region. Must be a lowercase string.
+ # The name used to namespace all the Aurora resources created by these
+ # templates, including the cluster and cluster instances (e.g. drupaldb). Must
+ # be unique in this region. Must be a lowercase string.
name =
# The ID of the VPC in which to deploy Aurora.
@@ -541,80 +547,83 @@ inputs = {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and disk
- # space usage) should send notifications. Also used for the alarms if the share
- # snapshot backup job fails.
+ # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
+ # disk space usage) should send notifications. Also used for the alarms if the
+ # share snapshot backup job fails.
alarms_sns_topic_arns = []
- # The list of network CIDR blocks to allow network access to Aurora from. One of
- # var.allow_connections_from_cidr_blocks or
- # var.allow_connections_from_security_groups must be specified for the database to
- # be reachable.
+ # The list of network CIDR blocks to allow network access to Aurora from. One
+ # of var.allow_connections_from_cidr_blocks or
+ # var.allow_connections_from_security_groups must be specified for the
+ # database to be reachable.
allow_connections_from_cidr_blocks = []
- # The list of IDs or Security Groups to allow network access to Aurora from. All
- # security groups must either be in the VPC specified by var.vpc_id, or a peered
- # VPC with the VPC specified by var.vpc_id. One of
+ # The list of IDs or Security Groups to allow network access to Aurora from.
+ # All security groups must either be in the VPC specified by var.vpc_id, or a
+ # peered VPC with the VPC specified by var.vpc_id. One of
# var.allow_connections_from_cidr_blocks or
- # var.allow_connections_from_security_groups must be specified for the database to
- # be reachable.
+ # var.allow_connections_from_security_groups must be specified for the
+ # database to be reachable.
allow_connections_from_security_groups = []
# Enable to allow major engine version upgrades when changing engine versions.
allow_major_version_upgrade = false
- # Specifies whether any cluster modifications are applied immediately, or during
- # the next maintenance window. Note that cluster modifications may cause degraded
- # performance or downtime.
+ # Specifies whether any cluster modifications are applied immediately, or
+ # during the next maintenance window. Note that cluster modifications may
+ # cause degraded performance or downtime.
apply_immediately = false
# Configure the auto minor version upgrade behavior. This is applied to the
- # cluster instances and indicates if the automatic minor version upgrade of the
- # engine is allowed. Default value is true.
+ # cluster instances and indicates if the automatic minor version upgrade of
+ # the engine is allowed. Default value is true.
auto_minor_version_upgrade = true
- # How often, in seconds, the backup job is expected to run. This is the same as
- # var.schedule_expression, but unfortunately, Terraform offers no way to convert
- # rate expressions to seconds. We add a CloudWatch alarm that triggers if the
- # metric in var.create_snapshot_cloudwatch_metric_namespace isn't updated within
- # this time period, as that indicates the backup failed to run.
+ # How often, in seconds, the backup job is expected to run. This is the same
+ # as var.schedule_expression, but unfortunately, Terraform offers no way to
+ # convert rate expressions to seconds. We add a CloudWatch alarm that triggers
+ # if the metric in var.create_snapshot_cloudwatch_metric_namespace isn't
+ # updated within this time period, as that indicates the backup failed to run.
backup_job_alarm_period = 3600
# Sets how the backup job alarm should handle entering the INSUFFICIENT_DATA
# state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
backup_job_alarm_treat_missing_data = "missing"
- # How many days to keep backup snapshots around before cleaning them up. Max: 35
+ # How many days to keep backup snapshots around before cleaning them up. Max:
+ # 35
backup_retention_period = 30
# Copy all the Aurora cluster tags to snapshots. Default is false.
copy_tags_to_snapshot = false
- # Set to true if you want a DNS record automatically created and pointed at the
- # RDS endpoints.
+ # Set to true if you want a DNS record automatically created and pointed at
+ # the RDS endpoints.
create_route53_entry = false
- # The namespace to use for the CloudWatch metric we report every time a new RDS
- # snapshot is created. We add a CloudWatch alarm on this metric to notify us if
- # the backup job fails to run for any reason. Defaults to the cluster name.
+ # The namespace to use for the CloudWatch metric we report every time a new
+ # RDS snapshot is created. We add a CloudWatch alarm on this metric to notify
+ # us if the backup job fails to run for any reason. Defaults to the cluster
+ # name.
create_snapshot_cloudwatch_metric_namespace = null
- # A map of custom tags to apply to the RDS cluster and all associated resources
- # created for it. The key is the tag name and the value is the tag value.
+ # A map of custom tags to apply to the RDS cluster and all associated
+ # resources created for it. The key is the tag name and the value is the tag
+ # value.
custom_tags = {}
- # Parameters for the cpu usage widget to output for use in a CloudWatch dashboard.
+ # Parameters for the cpu usage widget to output for use in a CloudWatch
+ # dashboard.
dashboard_cpu_usage_widget_parameters = {"height":6,"period":60,"width":8}
- # Parameters for the database connections widget to output for use in a CloudWatch
- # dashboard.
+ # Parameters for the database connections widget to output for use in a
+ # CloudWatch dashboard.
dashboard_db_connections_widget_parameters = {"height":6,"period":60,"width":8}
- # Parameters for the available disk space widget to output for use in a CloudWatch
- # dashboard.
+ # Parameters for the available disk space widget to output for use in a
+ # CloudWatch dashboard.
dashboard_disk_space_widget_parameters = {"height":6,"period":60,"width":8}
# Parameters for the available memory widget to output for use in a CloudWatch
@@ -629,43 +638,42 @@ inputs = {
# dashboard.
dashboard_write_latency_widget_parameters = {"height":6,"period":60,"width":8}
- # Configure a custom parameter group for the RDS DB cluster. This will create a
- # new parameter group with the given parameters. When null, the database will be
- # launched with the default parameter group.
+ # Configure a custom parameter group for the RDS DB cluster. This will create
+ # a new parameter group with the given parameters. When null, the database
+ # will be launched with the default parameter group.
db_cluster_custom_parameter_group = null
- # The friendly name or ARN of an AWS Secrets Manager secret that contains database
- # configuration information in the format outlined by this document:
+ # The friendly name or ARN of an AWS Secrets Manager secret that contains
+ # database configuration information in the format outlined by this document:
# https://docs.aws.amazon.com/secretsmanager/latest/userguide/best-practices.html.
- # The engine, username, password, dbname, and port fields must be included in the
- # JSON. Note that even with this precaution, this information will be stored in
- # plaintext in the Terraform state file! See the following blog post for more
- # details:
- # https://blog.gruntwork.io/a-comprehensive-guide-to-managing-secrets-in-your-terr
- # form-code-1d586955ace1. If you do not wish to use Secrets Manager, leave this as
- # null, and use the master_username, master_password, db_name, engine, and port
- # variables.
+ # The engine, username, password, dbname, and port fields must be included in
+ # the JSON. Note that even with this precaution, this information will be
+ # stored in plaintext in the Terraform state file! See the following blog post
+ # for more details:
+ # https://blog.gruntwork.io/a-comprehensive-guide-to-managing-secrets-in-your-terraform-code-1d586955ace1.
+ # If you do not wish to use Secrets Manager, leave this as null, and use the
+ # master_username, master_password, db_name, engine, and port variables.
db_config_secrets_manager_id = null
- # Configure a custom parameter group for the RDS DB Instance. This will create a
- # new parameter group with the given parameters. When null, the database will be
- # launched with the default parameter group.
+ # Configure a custom parameter group for the RDS DB Instance. This will create
+ # a new parameter group with the given parameters. When null, the database
+ # will be launched with the default parameter group.
db_instance_custom_parameter_group = null
- # The name for your database of up to 8 alpha-numeric characters. If you do not
- # provide a name, Amazon RDS will not create a database in the DB cluster you are
- # creating. This can also be provided via AWS Secrets Manager. See the description
- # of db_config_secrets_manager_id. A value here overrides the value in
- # db_config_secrets_manager_id.
+ # The name for your database of up to 8 alpha-numeric characters. If you do
+ # not provide a name, Amazon RDS will not create a database in the DB cluster
+ # you are creating. This can also be provided via AWS Secrets Manager. See the
+ # description of db_config_secrets_manager_id. A value here overrides the
+ # value in db_config_secrets_manager_id.
db_name = null
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arn.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arn.
enable_cloudwatch_alarms = true
- # When true, enable CloudWatch metrics for the manual snapshots created for the
- # purpose of sharing with another account.
+ # When true, enable CloudWatch metrics for the manual snapshots created for
+ # the purpose of sharing with another account.
enable_cloudwatch_metrics = true
# Enable deletion protection on the database instance. If this is enabled, the
@@ -674,8 +682,8 @@ inputs = {
# Set to true to enable alarms related to performance, such as read and write
# latency alarms. Set to false to disable those alarms if you aren't sure what
- # would be reasonable perf numbers for your RDS set up or if those numbers are too
- # unpredictable.
+ # would be reasonable perf numbers for your RDS set up or if those numbers are
+ # too unpredictable.
enable_perf_alarms = true
# When true, enable CloudWatch alarms for the manual snapshots created for the
@@ -683,15 +691,15 @@ inputs = {
# var.share_snapshot_with_another_account is true.
enable_share_snapshot_cloudwatch_alarms = true
- # If non-empty, the Aurora cluster will export the specified logs to Cloudwatch.
- # Must be zero or more of: audit, error, general and slowquery
+ # If non-empty, the Aurora cluster will export the specified logs to
+ # Cloudwatch. Must be zero or more of: audit, error, general and slowquery
enabled_cloudwatch_logs_exports = []
- # The name of the database engine to be used for this DB cluster. Valid Values:
- # aurora (for MySQL 5.6-compatible Aurora), aurora-mysql (for MySQL 5.7-compatible
- # Aurora), and aurora-postgresql. This can also be provided via AWS Secrets
- # Manager. See the description of db_config_secrets_manager_id. A value here
- # overrides the value in db_config_secrets_manager_id.
+ # The name of the database engine to be used for this DB cluster. Valid
+ # Values: aurora (for MySQL 5.6-compatible Aurora), aurora-mysql (for MySQL
+ # 5.7-compatible Aurora), and aurora-postgresql. This can also be provided via
+ # AWS Secrets Manager. See the description of db_config_secrets_manager_id. A
+ # value here overrides the value in db_config_secrets_manager_id.
engine = null
# The version of aurora to run - provisioned or serverless.
@@ -699,80 +707,81 @@ inputs = {
# The Amazon Aurora DB engine version for the selected engine and engine_mode.
# Note: Starting with Aurora MySQL 2.03.2, Aurora engine versions have the
- # following syntax .mysql_aurora.. e.g.
- # 5.7.mysql_aurora.2.08.1.
+ # following syntax .mysql_aurora..
+ # e.g. 5.7.mysql_aurora.2.08.1.
engine_version = null
- # The period, in seconds, over which to measure the CPU utilization percentage.
+ # The period, in seconds, over which to measure the CPU utilization
+ # percentage.
high_cpu_utilization_period = 60
- # Trigger an alarm if the DB instance has a CPU utilization percentage above this
- # threshold.
+ # Trigger an alarm if the DB instance has a CPU utilization percentage above
+ # this threshold.
high_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_cpu_utilization_treat_missing_data = "missing"
# The period, in seconds, over which to measure the read latency.
high_read_latency_period = 60
- # Trigger an alarm if the DB instance read latency (average amount of time taken
- # per disk I/O operation), in seconds, is above this threshold.
+ # Trigger an alarm if the DB instance read latency (average amount of time
+ # taken per disk I/O operation), in seconds, is above this threshold.
high_read_latency_threshold = 5
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_read_latency_treat_missing_data = "missing"
# The period, in seconds, over which to measure the write latency.
high_write_latency_period = 60
- # Trigger an alarm if the DB instance write latency (average amount of time taken
- # per disk I/O operation), in seconds, is above this threshold.
+ # Trigger an alarm if the DB instance write latency (average amount of time
+ # taken per disk I/O operation), in seconds, is above this threshold.
high_write_latency_threshold = 5
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_write_latency_treat_missing_data = "missing"
# The ID of the hosted zone in which to write DNS records
hosted_zone_id = null
- # Specifies whether mappings of AWS Identity and Access Management (IAM) accounts
- # to database accounts is enabled. Disabled by default.
+ # Specifies whether mappings of AWS Identity and Access Management (IAM)
+ # accounts to database accounts is enabled. Disabled by default.
iam_database_authentication_enabled = false
- # The number of DB instances, including the primary, to run in the RDS cluster.
- # Only used when var.engine_mode is set to provisioned.
+ # The number of DB instances, including the primary, to run in the RDS
+ # cluster. Only used when var.engine_mode is set to provisioned.
instance_count = 1
# The instance type to use for the db (e.g. db.r3.large). Only used when
# var.engine_mode is set to provisioned.
instance_type = "db.t3.small"
- # The ARN of a KMS key that should be used to encrypt data on disk. Only used if
- # var.storage_encrypted is true. If you leave this null, the default RDS KMS key
- # for the account will be used.
+ # The ARN of a KMS key that should be used to encrypt data on disk. Only used
+ # if var.storage_encrypted is true. If you leave this null, the default RDS
+ # KMS key for the account will be used.
kms_key_arn = null
# The period, in seconds, over which to measure the available free disk space.
low_disk_space_available_period = 60
- # Trigger an alarm if the amount of disk space, in Bytes, on the DB instance drops
- # below this threshold.
+ # Trigger an alarm if the amount of disk space, in Bytes, on the DB instance
+ # drops below this threshold.
low_disk_space_available_threshold = 1000000000
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
low_disk_space_available_treat_missing_data = "missing"
# The period, in seconds, over which to measure the available free memory.
@@ -782,10 +791,10 @@ inputs = {
# drops below this threshold.
low_memory_available_threshold = 100000000
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
low_memory_available_treat_missing_data = "missing"
# The value to use for the master password of the database. This can also be
@@ -807,100 +816,103 @@ inputs = {
# The ARN for the KMS key to encrypt Performance Insights data.
performance_insights_kms_key_id = null
- # The port the DB will listen on (e.g. 3306). This can also be provided via AWS
- # Secrets Manager. See the description of db_config_secrets_manager_id. A value
- # here overrides the value in db_config_secrets_manager_id.
+ # The port the DB will listen on (e.g. 3306). This can also be provided via
+ # AWS Secrets Manager. See the description of db_config_secrets_manager_id. A
+ # value here overrides the value in db_config_secrets_manager_id.
port = null
# The daily time range during which automated backups are created (e.g.
- # 04:00-09:00). Time zone is UTC. Performance may be degraded while a backup runs.
+ # 04:00-09:00). Time zone is UTC. Performance may be degraded while a backup
+ # runs.
preferred_backup_window = "06:00-07:00"
- # The weekly day and time range during which cluster maintenance can occur (e.g.
- # wed:04:00-wed:04:30). Time zone is UTC. Performance may be degraded or there may
- # even be a downtime during maintenance windows.
+ # The weekly day and time range during which cluster maintenance can occur
+ # (e.g. wed:04:00-wed:04:30). Time zone is UTC. Performance may be degraded or
+ # there may even be a downtime during maintenance windows.
preferred_maintenance_window = "sun:07:00-sun:08:00"
- # The domain name to create a route 53 record for the primary endpoint of the RDS
- # database.
+ # The domain name to create a route 53 record for the primary endpoint of the
+ # RDS database.
primary_domain_name = null
- # If you wish to make your database accessible from the public Internet, set this
- # flag to true (WARNING: NOT RECOMMENDED FOR REGULAR USAGE!!). The default is
- # false, which means the database is only accessible from within the VPC, which is
- # much more secure. This flag MUST be false for serverless mode.
+ # If you wish to make your database accessible from the public Internet, set
+ # this flag to true (WARNING: NOT RECOMMENDED FOR REGULAR USAGE!!). The
+ # default is false, which means the database is only accessible from within
+ # the VPC, which is much more secure. This flag MUST be false for serverless
+ # mode.
publicly_accessible = false
- # The domain name to create a route 53 record for the reader endpoint of the RDS
- # database. Note that Aurora Serverless does not have reader endpoints, so this
- # option is ignored when engine_mode is set to serverless.
+ # The domain name to create a route 53 record for the reader endpoint of the
+ # RDS database. Note that Aurora Serverless does not have reader endpoints, so
+ # this option is ignored when engine_mode is set to serverless.
reader_domain_name = null
- # If non-empty, the Aurora cluster will be restored from the given source cluster
- # using the latest restorable time. Can only be used if snapshot_identifier is
- # null. For more information see
+ # If non-empty, the Aurora cluster will be restored from the given source
+ # cluster using the latest restorable time. Can only be used if
+ # snapshot_identifier is null. For more information see
# https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_PIT.html
restore_source_cluster_identifier = null
- # Only used if 'restore_source_cluster_identifier' is non-empty. Type of restore
- # to be performed. Valid options are 'full-copy' and 'copy-on-write'.
- # https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Clo
- # e.html
+ # Only used if 'restore_source_cluster_identifier' is non-empty. Type of
+ # restore to be performed. Valid options are 'full-copy' and 'copy-on-write'.
+ # https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Clone.html
restore_type = null
# Whether to enable automatic pause. A DB cluster can be paused only when it's
# idle (it has no connections). If a DB cluster is paused for more than seven
- # days, the DB cluster might be backed up with a snapshot. In this case, the DB
- # cluster is restored when there is a request to connect to it. Only used when
- # var.engine_mode is set to serverless.
+ # days, the DB cluster might be backed up with a snapshot. In this case, the
+ # DB cluster is restored when there is a request to connect to it. Only used
+ # when var.engine_mode is set to serverless.
scaling_configuration_auto_pause = true
- # The maximum capacity. The maximum capacity must be greater than or equal to the
- # minimum capacity. Valid capacity values are 2, 4, 8, 16, 32, 64, 128, and 256.
- # Only used when var.engine_mode is set to serverless.
+ # The maximum capacity. The maximum capacity must be greater than or equal to
+ # the minimum capacity. Valid capacity values are 2, 4, 8, 16, 32, 64, 128,
+ # and 256. Only used when var.engine_mode is set to serverless.
scaling_configuration_max_capacity = 256
scaling_configuration_max_capacity_V2 = null
- # The minimum capacity. The minimum capacity must be lesser than or equal to the
- # maximum capacity. Valid capacity values are 2, 4, 8, 16, 32, 64, 128, and 256.
- # Only used when var.engine_mode is set to serverless.
+ # The minimum capacity. The minimum capacity must be lesser than or equal to
+ # the maximum capacity. Valid capacity values are 2, 4, 8, 16, 32, 64, 128,
+ # and 256. Only used when var.engine_mode is set to serverless.
scaling_configuration_min_capacity = 2
scaling_configuration_min_capacity_V2 = null
- # The time, in seconds, before an Aurora DB cluster in serverless mode is paused.
- # Valid values are 300 through 86400. Only used when var.engine_mode is set to
- # serverless.
+ # The time, in seconds, before an Aurora DB cluster in serverless mode is
+ # paused. Valid values are 300 through 86400. Only used when var.engine_mode
+ # is set to serverless.
scaling_configuration_seconds_until_auto_pause = 300
- # The maximum number of snapshots to keep around for the purpose of cross account
- # sharing. Once this number is exceeded, a lambda function will delete the oldest
- # snapshots. Only used if var.share_snapshot_with_another_account is true.
+ # The maximum number of snapshots to keep around for the purpose of cross
+ # account sharing. Once this number is exceeded, a lambda function will delete
+ # the oldest snapshots. Only used if var.share_snapshot_with_another_account
+ # is true.
share_snapshot_max_snapshots = 30
# An expression that defines how often to run the lambda function to take
- # snapshots for the purpose of cross account sharing. For example, cron(0 20 * * ?
- # *) or rate(5 minutes). Required if var.share_snapshot_with_another_account is
- # true
+ # snapshots for the purpose of cross account sharing. For example, cron(0 20 *
+ # * ? *) or rate(5 minutes). Required if
+ # var.share_snapshot_with_another_account is true
share_snapshot_schedule_expression = null
- # The ID of the AWS Account that the snapshot should be shared with. Required if
- # var.share_snapshot_with_another_account is true.
+ # The ID of the AWS Account that the snapshot should be shared with. Required
+ # if var.share_snapshot_with_another_account is true.
share_snapshot_with_account_id = null
- # If set to true, take periodic snapshots of the Aurora DB that should be shared
- # with another account.
+ # If set to true, take periodic snapshots of the Aurora DB that should be
+ # shared with another account.
share_snapshot_with_another_account = false
# Determines whether a final DB snapshot is created before the DB instance is
- # deleted. Be very careful setting this to true; if you do, and you delete this DB
- # instance, you will not have any backups of the data! You almost never want to
- # set this to true, unless you are doing automated or manual testing.
+ # deleted. Be very careful setting this to true; if you do, and you delete
+ # this DB instance, you will not have any backups of the data! You almost
+ # never want to set this to true, unless you are doing automated or manual
+ # testing.
skip_final_snapshot = false
- # If non-null, the RDS Instance will be restored from the given Snapshot ID. This
- # is the Snapshot ID you'd find in the RDS console, e.g:
+ # If non-null, the RDS Instance will be restored from the given Snapshot ID.
+ # This is the Snapshot ID you'd find in the RDS console, e.g:
# rds:production-2015-06-26-06-05.
snapshot_identifier = null
@@ -909,8 +921,8 @@ inputs = {
# snapshots. Uses the default aws/rds key in KMS.
storage_encrypted = true
- # Trigger an alarm if the number of connections to the DB instance goes above this
- # threshold.
+ # Trigger an alarm if the number of connections to the DB instance goes above
+ # this threshold.
too_many_db_connections_threshold = null
}
@@ -2235,11 +2247,11 @@ The ARN of the AWS Lambda Function used for sharing manual snapshots with second
diff --git a/docs/reference/services/data-storage/amazon-ecr-repositories.md b/docs/reference/services/data-storage/amazon-ecr-repositories.md
index 99052604c4..732698dbdb 100644
--- a/docs/reference/services/data-storage/amazon-ecr-repositories.md
+++ b/docs/reference/services/data-storage/amazon-ecr-repositories.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon ECR Repositories
-View Source
+View Source
Release Notes
@@ -59,7 +59,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -67,7 +67,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -86,7 +86,7 @@ If you want to deploy this repo in production, check out the following resources
module "ecr_repos" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/ecr-repos?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/ecr-repos?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -99,8 +99,8 @@ module "ecr_repos" {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Whether or not to enable image scanning on all the repos. Can be overridden on a
- # per repo basis by the enable_automatic_image_scanning property in the
+ # Whether or not to enable image scanning on all the repos. Can be overridden
+ # on a per repo basis by the enable_automatic_image_scanning property in the
# repositories map.
default_automatic_image_scanning = true
@@ -111,20 +111,21 @@ module "ecr_repos" {
default_encryption_config = {"encryption_type":"AES256","kms_key":null}
# The default list of AWS account IDs for external AWS accounts that should be
- # able to create Lambda functions based on container images in these ECR repos.
- # Can be overridden on a per repo basis by the
+ # able to create Lambda functions based on container images in these ECR
+ # repos. Can be overridden on a per repo basis by the
# external_account_ids_with_lambda_access property in the repositories map.
default_external_account_ids_with_lambda_access = []
# The default list of AWS account IDs for external AWS accounts that should be
- # able to pull images from these ECR repos. Can be overridden on a per repo basis
- # by the external_account_ids_with_read_access property in the repositories map.
+ # able to pull images from these ECR repos. Can be overridden on a per repo
+ # basis by the external_account_ids_with_read_access property in the
+ # repositories map.
default_external_account_ids_with_read_access = []
# The default list of AWS account IDs for external AWS accounts that should be
- # able to pull and push images to these ECR repos. Can be overridden on a per repo
- # basis by the external_account_ids_with_write_access property in the repositories
- # map.
+ # able to pull and push images to these ECR repos. Can be overridden on a per
+ # repo basis by the external_account_ids_with_write_access property in the
+ # repositories map.
default_external_account_ids_with_write_access = []
# The tag mutability setting for all the repos. Must be one of: MUTABLE or
@@ -135,8 +136,8 @@ module "ecr_repos" {
# Add lifecycle policy to ECR repo.
default_lifecycle_policy_rules = []
- # A map of tags (where the key and value correspond to tag keys and values) that
- # should be assigned to all ECR repositories.
+ # A map of tags (where the key and value correspond to tag keys and values)
+ # that should be assigned to all ECR repositories.
global_tags = {}
# List of regions (e.g., us-east-1) to replicate the ECR repository to.
@@ -157,7 +158,7 @@ module "ecr_repos" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/ecr-repos?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/ecr-repos?ref=v0.104.12"
}
inputs = {
@@ -173,8 +174,8 @@ inputs = {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Whether or not to enable image scanning on all the repos. Can be overridden on a
- # per repo basis by the enable_automatic_image_scanning property in the
+ # Whether or not to enable image scanning on all the repos. Can be overridden
+ # on a per repo basis by the enable_automatic_image_scanning property in the
# repositories map.
default_automatic_image_scanning = true
@@ -185,20 +186,21 @@ inputs = {
default_encryption_config = {"encryption_type":"AES256","kms_key":null}
# The default list of AWS account IDs for external AWS accounts that should be
- # able to create Lambda functions based on container images in these ECR repos.
- # Can be overridden on a per repo basis by the
+ # able to create Lambda functions based on container images in these ECR
+ # repos. Can be overridden on a per repo basis by the
# external_account_ids_with_lambda_access property in the repositories map.
default_external_account_ids_with_lambda_access = []
# The default list of AWS account IDs for external AWS accounts that should be
- # able to pull images from these ECR repos. Can be overridden on a per repo basis
- # by the external_account_ids_with_read_access property in the repositories map.
+ # able to pull images from these ECR repos. Can be overridden on a per repo
+ # basis by the external_account_ids_with_read_access property in the
+ # repositories map.
default_external_account_ids_with_read_access = []
# The default list of AWS account IDs for external AWS accounts that should be
- # able to pull and push images to these ECR repos. Can be overridden on a per repo
- # basis by the external_account_ids_with_write_access property in the repositories
- # map.
+ # able to pull and push images to these ECR repos. Can be overridden on a per
+ # repo basis by the external_account_ids_with_write_access property in the
+ # repositories map.
default_external_account_ids_with_write_access = []
# The tag mutability setting for all the repos. Must be one of: MUTABLE or
@@ -209,8 +211,8 @@ inputs = {
# Add lifecycle policy to ECR repo.
default_lifecycle_policy_rules = []
- # A map of tags (where the key and value correspond to tag keys and values) that
- # should be assigned to all ECR repositories.
+ # A map of tags (where the key and value correspond to tag keys and values)
+ # that should be assigned to all ECR repositories.
global_tags = {}
# List of regions (e.g., us-east-1) to replicate the ECR repository to.
@@ -460,11 +462,11 @@ A list of IAM policy actions necessary for ECR write access.
diff --git a/docs/reference/services/data-storage/amazon-elasti-cache-for-memcached.md b/docs/reference/services/data-storage/amazon-elasti-cache-for-memcached.md
index 9c62d873f2..fe9fc30ce0 100644
--- a/docs/reference/services/data-storage/amazon-elasti-cache-for-memcached.md
+++ b/docs/reference/services/data-storage/amazon-elasti-cache-for-memcached.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon ElastiCache for Memcached
-View Source
+View Source
Release Notes
@@ -64,7 +64,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -72,7 +72,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -91,32 +91,33 @@ If you want to deploy this repo in production, check out the following resources
module "memcached" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/memcached?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/memcached?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Specifies whether the nodes in this Memcached node group are created in a single
- # Availability Zone or created across multiple Availability Zones in the cluster's
- # region. Valid values for this parameter are single-az or cross-az. If you want
- # to choose cross-az, num_cache_nodes must be greater than 1.
+ # Specifies whether the nodes in this Memcached node group are created in a
+ # single Availability Zone or created across multiple Availability Zones in
+ # the cluster's region. Valid values for this parameter are single-az or
+ # cross-az. If you want to choose cross-az, num_cache_nodes must be greater
+ # than 1.
az_mode =
# The compute and memory capacity of the nodes (e.g. cache.m4.large).
instance_type =
- # The name used to namespace all resources created by these templates, including
- # the ElastiCache cluster itself. Must be unique in this region. Must be a
- # lowercase string.
+ # The name used to namespace all resources created by these templates,
+ # including the ElastiCache cluster itself. Must be unique in this region.
+ # Must be a lowercase string.
name =
# The initial number of cache nodes that the cache cluster will have. Must be
# between 1 and 20.
num_cache_nodes =
- # The list of IDs of the subnets in which to deploy the ElasticCache instances.
- # The list must only contain subnets in var.vpc_id.
+ # The list of IDs of the subnets in which to deploy the ElasticCache
+ # instances. The list must only contain subnets in var.vpc_id.
subnet_ids =
# The ID of the VPC in which to deploy RDS.
@@ -126,49 +127,49 @@ module "memcached" {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
alarm_treat_missing_data = "missing"
- # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and disk
- # space usage) should send notifications.
+ # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
+ # disk space usage) should send notifications.
alarms_sns_topic_arns = []
- # The list of network CIDR blocks to allow network access to ElastiCache from. One
- # of var.allow_connections_from_cidr_blocks or
- # var.allow_connections_from_security_groups must be specified for the ElastiCache
- # instances to be reachable.
+ # The list of network CIDR blocks to allow network access to ElastiCache from.
+ # One of var.allow_connections_from_cidr_blocks or
+ # var.allow_connections_from_security_groups must be specified for the
+ # ElastiCache instances to be reachable.
allow_connections_from_cidr_blocks = []
- # The list of IDs or Security Groups to allow network access to ElastiCache from.
- # All security groups must either be in the VPC specified by var.vpc_id, or a
- # peered VPC with the VPC specified by var.vpc_id. One of
+ # The list of IDs or Security Groups to allow network access to ElastiCache
+ # from. All security groups must either be in the VPC specified by var.vpc_id,
+ # or a peered VPC with the VPC specified by var.vpc_id. One of
# var.allow_connections_from_cidr_blocks or
- # var.allow_connections_from_security_groups must be specified for the ElastiCache
- # instances to be reachable.
+ # var.allow_connections_from_security_groups must be specified for the
+ # ElastiCache instances to be reachable.
allow_connections_from_security_groups = []
- # Specifies whether any database modifications are applied immediately, or during
- # the next maintenance window.
+ # Specifies whether any database modifications are applied immediately, or
+ # during the next maintenance window.
apply_immediately = false
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arn.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arn.
enable_cloudwatch_alarms = true
# Specifies the weekly time range for when maintenance on the cache cluster is
- # performed (e.g. sun:05:00-sun:09:00). The format is ddd:hh24:mi-ddd:hh24:mi (24H
- # Clock UTC). The minimum maintenance window is a 60 minute period.
+ # performed (e.g. sun:05:00-sun:09:00). The format is ddd:hh24:mi-ddd:hh24:mi
+ # (24H Clock UTC). The minimum maintenance window is a 60 minute period.
maintenance_window = "sat:07:00-sat:08:00"
# Version number of memcached to use (e.g. 1.5.16).
memcached_version = "1.5.16"
- # The port number on which each of the cache nodes will accept connections (e.g.
- # 11211).
+ # The port number on which each of the cache nodes will accept connections
+ # (e.g. 11211).
port = 11211
}
@@ -186,7 +187,7 @@ module "memcached" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/memcached?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/memcached?ref=v0.104.12"
}
inputs = {
@@ -195,26 +196,27 @@ inputs = {
# REQUIRED VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Specifies whether the nodes in this Memcached node group are created in a single
- # Availability Zone or created across multiple Availability Zones in the cluster's
- # region. Valid values for this parameter are single-az or cross-az. If you want
- # to choose cross-az, num_cache_nodes must be greater than 1.
+ # Specifies whether the nodes in this Memcached node group are created in a
+ # single Availability Zone or created across multiple Availability Zones in
+ # the cluster's region. Valid values for this parameter are single-az or
+ # cross-az. If you want to choose cross-az, num_cache_nodes must be greater
+ # than 1.
az_mode =
# The compute and memory capacity of the nodes (e.g. cache.m4.large).
instance_type =
- # The name used to namespace all resources created by these templates, including
- # the ElastiCache cluster itself. Must be unique in this region. Must be a
- # lowercase string.
+ # The name used to namespace all resources created by these templates,
+ # including the ElastiCache cluster itself. Must be unique in this region.
+ # Must be a lowercase string.
name =
# The initial number of cache nodes that the cache cluster will have. Must be
# between 1 and 20.
num_cache_nodes =
- # The list of IDs of the subnets in which to deploy the ElasticCache instances.
- # The list must only contain subnets in var.vpc_id.
+ # The list of IDs of the subnets in which to deploy the ElasticCache
+ # instances. The list must only contain subnets in var.vpc_id.
subnet_ids =
# The ID of the VPC in which to deploy RDS.
@@ -224,49 +226,49 @@ inputs = {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
alarm_treat_missing_data = "missing"
- # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and disk
- # space usage) should send notifications.
+ # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
+ # disk space usage) should send notifications.
alarms_sns_topic_arns = []
- # The list of network CIDR blocks to allow network access to ElastiCache from. One
- # of var.allow_connections_from_cidr_blocks or
- # var.allow_connections_from_security_groups must be specified for the ElastiCache
- # instances to be reachable.
+ # The list of network CIDR blocks to allow network access to ElastiCache from.
+ # One of var.allow_connections_from_cidr_blocks or
+ # var.allow_connections_from_security_groups must be specified for the
+ # ElastiCache instances to be reachable.
allow_connections_from_cidr_blocks = []
- # The list of IDs or Security Groups to allow network access to ElastiCache from.
- # All security groups must either be in the VPC specified by var.vpc_id, or a
- # peered VPC with the VPC specified by var.vpc_id. One of
+ # The list of IDs or Security Groups to allow network access to ElastiCache
+ # from. All security groups must either be in the VPC specified by var.vpc_id,
+ # or a peered VPC with the VPC specified by var.vpc_id. One of
# var.allow_connections_from_cidr_blocks or
- # var.allow_connections_from_security_groups must be specified for the ElastiCache
- # instances to be reachable.
+ # var.allow_connections_from_security_groups must be specified for the
+ # ElastiCache instances to be reachable.
allow_connections_from_security_groups = []
- # Specifies whether any database modifications are applied immediately, or during
- # the next maintenance window.
+ # Specifies whether any database modifications are applied immediately, or
+ # during the next maintenance window.
apply_immediately = false
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arn.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arn.
enable_cloudwatch_alarms = true
# Specifies the weekly time range for when maintenance on the cache cluster is
- # performed (e.g. sun:05:00-sun:09:00). The format is ddd:hh24:mi-ddd:hh24:mi (24H
- # Clock UTC). The minimum maintenance window is a 60 minute period.
+ # performed (e.g. sun:05:00-sun:09:00). The format is ddd:hh24:mi-ddd:hh24:mi
+ # (24H Clock UTC). The minimum maintenance window is a 60 minute period.
maintenance_window = "sat:07:00-sat:08:00"
# Version number of memcached to use (e.g. 1.5.16).
memcached_version = "1.5.16"
- # The port number on which each of the cache nodes will accept connections (e.g.
- # 11211).
+ # The port number on which each of the cache nodes will accept connections
+ # (e.g. 11211).
port = 11211
}
@@ -468,11 +470,11 @@ The configuration endpoint to allow host discovery.
diff --git a/docs/reference/services/data-storage/amazon-elasti-cache-for-redis.md b/docs/reference/services/data-storage/amazon-elasti-cache-for-redis.md
index d7675a3cb1..c7cc580a7f 100644
--- a/docs/reference/services/data-storage/amazon-elasti-cache-for-redis.md
+++ b/docs/reference/services/data-storage/amazon-elasti-cache-for-redis.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon ElastiCache for Redis
-View Source
+View Source
Release Notes
@@ -67,7 +67,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -75,7 +75,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -94,7 +94,7 @@ If you want to deploy this repo in production, check out the following resources
module "redis" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/redis?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/redis?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -102,30 +102,30 @@ module "redis" {
# Indicates whether Multi-AZ is enabled. When Multi-AZ is enabled, a read-only
# replica is automatically promoted to a read-write primary cluster if the
- # existing primary cluster fails. If you specify true, you must specify a value
- # greater than 1 for replication_group_size.
+ # existing primary cluster fails. If you specify true, you must specify a
+ # value greater than 1 for replication_group_size.
enable_automatic_failover =
# Indicates whether Multi-AZ is enabled. When Multi-AZ is enabled, a read-only
# replica is automatically promoted to a read-write primary cluster if the
- # existing primary cluster fails. If you specify true, you must specify a value
- # greater than 1 for replication_group_size.
+ # existing primary cluster fails. If you specify true, you must specify a
+ # value greater than 1 for replication_group_size.
enable_multi_az =
# The compute and memory capacity of the nodes (e.g. cache.m4.large).
instance_type =
- # The name used to namespace all resources created by these templates, including
- # the ElastiCache cluster itself (e.g. rediscache). Must be unique in this region.
- # Must be a lowercase string.
+ # The name used to namespace all resources created by these templates,
+ # including the ElastiCache cluster itself (e.g. rediscache). Must be unique
+ # in this region. Must be a lowercase string.
name =
- # The total number of nodes in the Redis Replication Group. E.g. 1 represents just
- # the primary node, 2 represents the primary plus a single Read Replica.
+ # The total number of nodes in the Redis Replication Group. E.g. 1 represents
+ # just the primary node, 2 represents the primary plus a single Read Replica.
replication_group_size =
- # The list of IDs of the subnets in which to deploy the ElasticCache instances.
- # The list must only contain subnets in var.vpc_id.
+ # The list of IDs of the subnets in which to deploy the ElasticCache
+ # instances. The list must only contain subnets in var.vpc_id.
subnet_ids =
# The ID of the VPC in which to deploy RDS.
@@ -135,83 +135,83 @@ module "redis" {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Trigger an alarm if the amount of free memory, in Bytes, on the node drops below
- # this threshold
+ # Trigger an alarm if the amount of free memory, in Bytes, on the node drops
+ # below this threshold
alarm_low_memory_available_threshold = 100000000
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
alarm_treat_missing_data = "missing"
- # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and disk
- # space usage) should send notifications.
+ # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
+ # disk space usage) should send notifications.
alarms_sns_topic_arns = []
- # The list of network CIDR blocks to allow network access to ElastiCache from. One
- # of var.allow_connections_from_cidr_blocks or
- # var.allow_connections_from_security_groups must be specified for the ElastiCache
- # instances to be reachable.
+ # The list of network CIDR blocks to allow network access to ElastiCache from.
+ # One of var.allow_connections_from_cidr_blocks or
+ # var.allow_connections_from_security_groups must be specified for the
+ # ElastiCache instances to be reachable.
allow_connections_from_cidr_blocks = []
- # The list of IDs or Security Groups to allow network access to ElastiCache from.
- # All security groups must either be in the VPC specified by var.vpc_id, or a
- # peered VPC with the VPC specified by var.vpc_id. One of
+ # The list of IDs or Security Groups to allow network access to ElastiCache
+ # from. All security groups must either be in the VPC specified by var.vpc_id,
+ # or a peered VPC with the VPC specified by var.vpc_id. One of
# var.allow_connections_from_cidr_blocks or
- # var.allow_connections_from_security_groups must be specified for the ElastiCache
- # instances to be reachable.
+ # var.allow_connections_from_security_groups must be specified for the
+ # ElastiCache instances to be reachable.
allow_connections_from_security_groups = []
- # Specifies whether any modifications are applied immediately, or during the next
- # maintenance window.
+ # Specifies whether any modifications are applied immediately, or during the
+ # next maintenance window.
apply_immediately = false
- # The password used to access a password protected server. Can be specified only
- # if transit_encryption_enabled = true. Must contain from 16 to 128 alphanumeric
- # characters or symbols (excluding @, , and /)
+ # The password used to access a password protected server. Can be specified
+ # only if transit_encryption_enabled = true. Must contain from 16 to 128
+ # alphanumeric characters or symbols (excluding @, , and /)
auth_token = null
- # Specifies the number of shards and replicas per shard in the cluster. The list
- # should contain a single map with keys 'num_node_groups' and
+ # Specifies the number of shards and replicas per shard in the cluster. The
+ # list should contain a single map with keys 'num_node_groups' and
# 'replicas_per_node_group' set to desired integer values.
cluster_mode = []
# Whether to enable encryption at rest.
enable_at_rest_encryption = true
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arn.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arn.
enable_cloudwatch_alarms = true
# Whether to enable encryption in transit.
enable_transit_encryption = true
# Specifies the weekly time range for when maintenance on the cache cluster is
- # performed (e.g. sun:05:00-sun:09:00). The format is ddd:hh24:mi-ddd:hh24:mi (24H
- # Clock UTC). The minimum maintenance window is a 60 minute period.
+ # performed (e.g. sun:05:00-sun:09:00). The format is ddd:hh24:mi-ddd:hh24:mi
+ # (24H Clock UTC). The minimum maintenance window is a 60 minute period.
maintenance_window = "sat:07:00-sat:08:00"
- # Name of the parameter group to associate with this cache cluster. This can be
- # used to configure custom settings for the cluster.
+ # Name of the parameter group to associate with this cache cluster. This can
+ # be used to configure custom settings for the cluster.
parameter_group_name = null
- # The port number on which each of the cache nodes will accept connections (e.g.
- # 6379).
+ # The port number on which each of the cache nodes will accept connections
+ # (e.g. 6379).
port = 6379
# Version number of redis to use (e.g. 5.0.6).
redis_version = "5.0.6"
- # The Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3.
- # You can use this parameter to restore from an externally created snapshot. If
- # you have an ElastiCache snapshot, use snapshot_name.
+ # The Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon
+ # S3. You can use this parameter to restore from an externally created
+ # snapshot. If you have an ElastiCache snapshot, use snapshot_name.
snapshot_arn = null
- # The name of a snapshot from which to restore the Redis cluster. You can use this
- # to restore from an ElastiCache snapshot. If you have an externally created
- # snapshot, use snapshot_arn.
+ # The name of a snapshot from which to restore the Redis cluster. You can use
+ # this to restore from an ElastiCache snapshot. If you have an externally
+ # created snapshot, use snapshot_arn.
snapshot_name = null
# The number of days for which ElastiCache will retain automatic cache cluster
@@ -219,14 +219,14 @@ module "redis" {
snapshot_retention_limit = 15
# The daily time range during which automated backups are created (e.g.
- # 04:00-09:00). Time zone is UTC. Performance may be degraded while a backup runs.
- # Set to empty string to disable snapshots.
+ # 04:00-09:00). Time zone is UTC. Performance may be degraded while a backup
+ # runs. Set to empty string to disable snapshots.
snapshot_window = "06:00-07:00"
- # The ARN of the SNS Topic to which notifications will be sent when a Replication
- # Group event happens, such as an automatic failover (e.g.
- # arn:aws:sns:*:123456789012:my_sns_topic). An empty string is a valid value if
- # you do not wish to receive notifications via SNS.
+ # The ARN of the SNS Topic to which notifications will be sent when a
+ # Replication Group event happens, such as an automatic failover (e.g.
+ # arn:aws:sns:*:123456789012:my_sns_topic). An empty string is a valid value
+ # if you do not wish to receive notifications via SNS.
sns_topic_for_notifications = ""
# A set of tags to set for the ElastiCache Replication Group.
@@ -247,7 +247,7 @@ module "redis" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/redis?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/redis?ref=v0.104.12"
}
inputs = {
@@ -258,30 +258,30 @@ inputs = {
# Indicates whether Multi-AZ is enabled. When Multi-AZ is enabled, a read-only
# replica is automatically promoted to a read-write primary cluster if the
- # existing primary cluster fails. If you specify true, you must specify a value
- # greater than 1 for replication_group_size.
+ # existing primary cluster fails. If you specify true, you must specify a
+ # value greater than 1 for replication_group_size.
enable_automatic_failover =
# Indicates whether Multi-AZ is enabled. When Multi-AZ is enabled, a read-only
# replica is automatically promoted to a read-write primary cluster if the
- # existing primary cluster fails. If you specify true, you must specify a value
- # greater than 1 for replication_group_size.
+ # existing primary cluster fails. If you specify true, you must specify a
+ # value greater than 1 for replication_group_size.
enable_multi_az =
# The compute and memory capacity of the nodes (e.g. cache.m4.large).
instance_type =
- # The name used to namespace all resources created by these templates, including
- # the ElastiCache cluster itself (e.g. rediscache). Must be unique in this region.
- # Must be a lowercase string.
+ # The name used to namespace all resources created by these templates,
+ # including the ElastiCache cluster itself (e.g. rediscache). Must be unique
+ # in this region. Must be a lowercase string.
name =
- # The total number of nodes in the Redis Replication Group. E.g. 1 represents just
- # the primary node, 2 represents the primary plus a single Read Replica.
+ # The total number of nodes in the Redis Replication Group. E.g. 1 represents
+ # just the primary node, 2 represents the primary plus a single Read Replica.
replication_group_size =
- # The list of IDs of the subnets in which to deploy the ElasticCache instances.
- # The list must only contain subnets in var.vpc_id.
+ # The list of IDs of the subnets in which to deploy the ElasticCache
+ # instances. The list must only contain subnets in var.vpc_id.
subnet_ids =
# The ID of the VPC in which to deploy RDS.
@@ -291,83 +291,83 @@ inputs = {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Trigger an alarm if the amount of free memory, in Bytes, on the node drops below
- # this threshold
+ # Trigger an alarm if the amount of free memory, in Bytes, on the node drops
+ # below this threshold
alarm_low_memory_available_threshold = 100000000
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
alarm_treat_missing_data = "missing"
- # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and disk
- # space usage) should send notifications.
+ # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
+ # disk space usage) should send notifications.
alarms_sns_topic_arns = []
- # The list of network CIDR blocks to allow network access to ElastiCache from. One
- # of var.allow_connections_from_cidr_blocks or
- # var.allow_connections_from_security_groups must be specified for the ElastiCache
- # instances to be reachable.
+ # The list of network CIDR blocks to allow network access to ElastiCache from.
+ # One of var.allow_connections_from_cidr_blocks or
+ # var.allow_connections_from_security_groups must be specified for the
+ # ElastiCache instances to be reachable.
allow_connections_from_cidr_blocks = []
- # The list of IDs or Security Groups to allow network access to ElastiCache from.
- # All security groups must either be in the VPC specified by var.vpc_id, or a
- # peered VPC with the VPC specified by var.vpc_id. One of
+ # The list of IDs or Security Groups to allow network access to ElastiCache
+ # from. All security groups must either be in the VPC specified by var.vpc_id,
+ # or a peered VPC with the VPC specified by var.vpc_id. One of
# var.allow_connections_from_cidr_blocks or
- # var.allow_connections_from_security_groups must be specified for the ElastiCache
- # instances to be reachable.
+ # var.allow_connections_from_security_groups must be specified for the
+ # ElastiCache instances to be reachable.
allow_connections_from_security_groups = []
- # Specifies whether any modifications are applied immediately, or during the next
- # maintenance window.
+ # Specifies whether any modifications are applied immediately, or during the
+ # next maintenance window.
apply_immediately = false
- # The password used to access a password protected server. Can be specified only
- # if transit_encryption_enabled = true. Must contain from 16 to 128 alphanumeric
- # characters or symbols (excluding @, , and /)
+ # The password used to access a password protected server. Can be specified
+ # only if transit_encryption_enabled = true. Must contain from 16 to 128
+ # alphanumeric characters or symbols (excluding @, , and /)
auth_token = null
- # Specifies the number of shards and replicas per shard in the cluster. The list
- # should contain a single map with keys 'num_node_groups' and
+ # Specifies the number of shards and replicas per shard in the cluster. The
+ # list should contain a single map with keys 'num_node_groups' and
# 'replicas_per_node_group' set to desired integer values.
cluster_mode = []
# Whether to enable encryption at rest.
enable_at_rest_encryption = true
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arn.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arn.
enable_cloudwatch_alarms = true
# Whether to enable encryption in transit.
enable_transit_encryption = true
# Specifies the weekly time range for when maintenance on the cache cluster is
- # performed (e.g. sun:05:00-sun:09:00). The format is ddd:hh24:mi-ddd:hh24:mi (24H
- # Clock UTC). The minimum maintenance window is a 60 minute period.
+ # performed (e.g. sun:05:00-sun:09:00). The format is ddd:hh24:mi-ddd:hh24:mi
+ # (24H Clock UTC). The minimum maintenance window is a 60 minute period.
maintenance_window = "sat:07:00-sat:08:00"
- # Name of the parameter group to associate with this cache cluster. This can be
- # used to configure custom settings for the cluster.
+ # Name of the parameter group to associate with this cache cluster. This can
+ # be used to configure custom settings for the cluster.
parameter_group_name = null
- # The port number on which each of the cache nodes will accept connections (e.g.
- # 6379).
+ # The port number on which each of the cache nodes will accept connections
+ # (e.g. 6379).
port = 6379
# Version number of redis to use (e.g. 5.0.6).
redis_version = "5.0.6"
- # The Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3.
- # You can use this parameter to restore from an externally created snapshot. If
- # you have an ElastiCache snapshot, use snapshot_name.
+ # The Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon
+ # S3. You can use this parameter to restore from an externally created
+ # snapshot. If you have an ElastiCache snapshot, use snapshot_name.
snapshot_arn = null
- # The name of a snapshot from which to restore the Redis cluster. You can use this
- # to restore from an ElastiCache snapshot. If you have an externally created
- # snapshot, use snapshot_arn.
+ # The name of a snapshot from which to restore the Redis cluster. You can use
+ # this to restore from an ElastiCache snapshot. If you have an externally
+ # created snapshot, use snapshot_arn.
snapshot_name = null
# The number of days for which ElastiCache will retain automatic cache cluster
@@ -375,14 +375,14 @@ inputs = {
snapshot_retention_limit = 15
# The daily time range during which automated backups are created (e.g.
- # 04:00-09:00). Time zone is UTC. Performance may be degraded while a backup runs.
- # Set to empty string to disable snapshots.
+ # 04:00-09:00). Time zone is UTC. Performance may be degraded while a backup
+ # runs. Set to empty string to disable snapshots.
snapshot_window = "06:00-07:00"
- # The ARN of the SNS Topic to which notifications will be sent when a Replication
- # Group event happens, such as an automatic failover (e.g.
- # arn:aws:sns:*:123456789012:my_sns_topic). An empty string is a valid value if
- # you do not wish to receive notifications via SNS.
+ # The ARN of the SNS Topic to which notifications will be sent when a
+ # Replication Group event happens, such as an automatic failover (e.g.
+ # arn:aws:sns:*:123456789012:my_sns_topic). An empty string is a valid value
+ # if you do not wish to receive notifications via SNS.
sns_topic_for_notifications = ""
# A set of tags to set for the ElastiCache Replication Group.
@@ -741,11 +741,11 @@ Security Group ID used for redis cluster.
diff --git a/docs/reference/services/data-storage/amazon-elasticsearch.md b/docs/reference/services/data-storage/amazon-elasticsearch.md
index 955fecbe9b..3664a6329c 100644
--- a/docs/reference/services/data-storage/amazon-elasticsearch.md
+++ b/docs/reference/services/data-storage/amazon-elasticsearch.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon Elasticsearch Service
-View Source
+View Source
Release Notes
@@ -63,7 +63,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -76,7 +76,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the [Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/),
and it shows you how we build an end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -101,49 +101,49 @@ If you want to deploy this repo in production, check out the following resources
module "elasticsearch" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/elasticsearch?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/elasticsearch?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
# ----------------------------------------------------------------------------------------------------
# The name of the Elasticsearch cluster. It must be unique to your account and
- # region, start with a lowercase letter, contain between 3 and 28 characters, and
- # contain only lowercase letters a-z, the numbers 0-9, and the hyphen (-).
+ # region, start with a lowercase letter, contain between 3 and 28 characters,
+ # and contain only lowercase letters a-z, the numbers 0-9, and the hyphen (-).
domain_name =
- # The number of instances to deploy in the Elasticsearch cluster. This must be an
- # even number if zone_awareness_enabled is true.
+ # The number of instances to deploy in the Elasticsearch cluster. This must be
+ # an even number if zone_awareness_enabled is true.
instance_count =
# The instance type to use for Elasticsearch data nodes (e.g.,
- # t2.small.elasticsearch, or m4.large.elasticsearch). For supported instance types
- # see
- # https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/aes-supp
- # rted-instance-types.html.
+ # t2.small.elasticsearch, or m4.large.elasticsearch). For supported instance
+ # types see
+ # https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/aes-supported-instance-types.html.
instance_type =
# The size in GiB of the EBS volume for each node in the cluster (e.g. 10, or
# 512). For volume size limits see
- # https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/aes-limi
- # s.html.
+ # https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/aes-limits.html.
volume_size =
- # The type of EBS volumes to use in the cluster. Must be one of: standard, gp2,
- # io1, sc1, or st1. For a comparison of EBS volume types, see
+ # The type of EBS volumes to use in the cluster. Must be one of: standard,
+ # gp2, io1, sc1, or st1. For a comparison of EBS volume types, see
# https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-volume-types.html.
volume_type =
- # Whether to deploy the Elasticsearch nodes across two Availability Zones instead
- # of one. Note that if you enable this, the instance_count MUST be an even number.
+ # Whether to deploy the Elasticsearch nodes across two Availability Zones
+ # instead of one. Note that if you enable this, the instance_count MUST be an
+ # even number.
zone_awareness_enabled =
# ----------------------------------------------------------------------------------------------------
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Key-value string pairs to specify advanced configuration options. Note that the
- # values for these configuration options must be strings (wrapped in quotes).
+ # Key-value string pairs to specify advanced configuration options. Note that
+ # the values for these configuration options must be strings (wrapped in
+ # quotes).
advanced_options = {}
# Enable fine grain access control
@@ -153,22 +153,23 @@ module "elasticsearch" {
# Elasticsearch cluster.
alarm_sns_topic_arns = []
- # The list of network CIDR blocks to allow network access to Aurora from. One of
- # var.allow_connections_from_cidr_blocks or
- # var.allow_connections_from_security_groups must be specified for the database to
- # be reachable.
+ # The list of network CIDR blocks to allow network access to Aurora from. One
+ # of var.allow_connections_from_cidr_blocks or
+ # var.allow_connections_from_security_groups must be specified for the
+ # database to be reachable.
allow_connections_from_cidr_blocks = []
- # The list of IDs or Security Groups to allow network access to Aurora from. All
- # security groups must either be in the VPC specified by var.vpc_id, or a peered
- # VPC with the VPC specified by var.vpc_id. One of
+ # The list of IDs or Security Groups to allow network access to Aurora from.
+ # All security groups must either be in the VPC specified by var.vpc_id, or a
+ # peered VPC with the VPC specified by var.vpc_id. One of
# var.allow_connections_from_cidr_blocks or
- # var.allow_connections_from_security_groups must be specified for the database to
- # be reachable.
+ # var.allow_connections_from_security_groups must be specified for the
+ # database to be reachable.
allow_connections_from_security_groups = []
- # Hour during which the service takes an automated daily snapshot of the indices
- # in the domain. This setting has no effect on Elasticsearch 5.3 and later.
+ # Hour during which the service takes an automated daily snapshot of the
+ # indices in the domain. This setting has no effect on Elasticsearch 5.3 and
+ # later.
automated_snapshot_start_hour = 0
# Number of Availability Zones for the domain to use with
@@ -178,14 +179,14 @@ module "elasticsearch" {
# The period, in seconds, over which to measure the CPU utilization percentage
cluster_high_cpu_utilization_period = 60
- # Trigger an alarm if the Elasticsearch cluster has a CPU utilization percentage
- # above this threshold
+ # Trigger an alarm if the Elasticsearch cluster has a CPU utilization
+ # percentage above this threshold
cluster_high_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
cluster_high_cpu_utilization_treat_missing_data = "missing"
# The period, in seconds, over which to measure the JVM heap usage percentage
@@ -194,84 +195,85 @@ module "elasticsearch" {
# Trigger an alarm if the JVM heap usage percentage goes above this threshold
cluster_high_jvm_memory_pressure_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
cluster_high_jvm_memory_pressure_treat_missing_data = "missing"
- # The maximum amount of time, in seconds, that ClusterIndexWritesBlocked can be in
- # red status before triggering an alarm
+ # The maximum amount of time, in seconds, that ClusterIndexWritesBlocked can
+ # be in red status before triggering an alarm
cluster_index_writes_blocked_period = 300
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
cluster_index_writes_blocked_treat_missing_data = "missing"
# The period, in seconds, over which to measure the CPU credit balance
cluster_low_cpu_credit_balance_period = 60
- # Trigger an alarm if the CPU credit balance drops below this threshold. Only used
- # if var.instance_type is t2.xxx.
+ # Trigger an alarm if the CPU credit balance drops below this threshold. Only
+ # used if var.instance_type is t2.xxx.
cluster_low_cpu_credit_balance_threshold = 10
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
cluster_low_cpu_credit_balance_treat_missing_data = "missing"
- # The period, in seconds, over which to measure the available free storage space
+ # The period, in seconds, over which to measure the available free storage
+ # space
cluster_low_free_storage_space_period = 60
# Trigger an alarm if the amount of free storage space, in Megabytes, on the
# Elasticsearch cluster drops below this threshold
cluster_low_free_storage_space_threshold = 1024
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
cluster_low_free_storage_space_treat_missing_data = "missing"
- # The maximum amount of time, in seconds, during with the AutomatedSnapshotFailure
- # can be in red status before triggering an alarm
+ # The maximum amount of time, in seconds, during with the
+ # AutomatedSnapshotFailure can be in red status before triggering an alarm
cluster_snapshot_period = 60
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
cluster_snapshot_treat_missing_data = "missing"
- # The maximum amount of time, in seconds, during which the cluster can be in red
- # status before triggering an alarm
+ # The maximum amount of time, in seconds, during which the cluster can be in
+ # red status before triggering an alarm
cluster_status_red_period = 300
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
cluster_status_red_treat_missing_data = "missing"
# The maximum amount of time, in seconds, during which the cluster can be in
# yellow status before triggering an alarm
cluster_status_yellow_period = 300
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
cluster_status_yellow_treat_missing_data = "missing"
# Whether or not the Service Linked Role for Elasticsearch should be created
- # within this module. Normally the service linked role is created automatically by
- # AWS when creating the Elasticsearch domain in the web console, but API does not
- # implement this logic. You can either have AWS automatically manage this by
- # creating a domain manually in the console, or manage it in terraform using the
- # landing zone modules or this variable.
+ # within this module. Normally the service linked role is created
+ # automatically by AWS when creating the Elasticsearch domain in the web
+ # console, but API does not implement this logic. You can either have AWS
+ # automatically manage this by creating a domain manually in the console, or
+ # manage it in terraform using the landing zone modules or this variable.
create_service_linked_role = false
# Fully qualified domain for your custom endpoint.
@@ -283,18 +285,19 @@ module "elasticsearch" {
# Whether to enable custom endpoint for the Elasticsearch domain.
custom_endpoint_enabled = false
- # A map of custom tags to apply to the ElasticSearch Domain. The key is the tag
- # name and the value is the tag value.
+ # A map of custom tags to apply to the ElasticSearch Domain. The key is the
+ # tag name and the value is the tag value.
custom_tags = {}
- # The number of dedicated master nodes to run. We recommend setting this to 3 for
- # production deployments. Only used if var.dedicated_master_enabled is true.
+ # The number of dedicated master nodes to run. We recommend setting this to 3
+ # for production deployments. Only used if var.dedicated_master_enabled is
+ # true.
dedicated_master_count = null
- # Whether to deploy separate nodes specifically for performing cluster management
- # tasks (e.g. tracking number of nodes, monitoring health, replicating changes).
- # This increases the stability of large clusters and is required for clusters with
- # more than 10 nodes.
+ # Whether to deploy separate nodes specifically for performing cluster
+ # management tasks (e.g. tracking number of nodes, monitoring health,
+ # replicating changes). This increases the stability of large clusters and is
+ # required for clusters with more than 10 nodes.
dedicated_master_enabled = false
# The instance type for the dedicated master nodes. These nodes can use a
@@ -309,28 +312,29 @@ module "elasticsearch" {
# The version of Elasticsearch to deploy.
elasticsearch_version = "7.7"
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arns.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arns.
enable_cloudwatch_alarms = true
- # False by default because encryption at rest is not included in the free tier.
- # When true, the Elasticsearch domain storage will be encrypted at rest using the
- # KMS key described with var.encryption_kms_key_id. We strongly recommend
- # configuring a custom KMS key instead of using the shared service key for a
- # better security posture when configuring encryption at rest.
+ # False by default because encryption at rest is not included in the free
+ # tier. When true, the Elasticsearch domain storage will be encrypted at rest
+ # using the KMS key described with var.encryption_kms_key_id. We strongly
+ # recommend configuring a custom KMS key instead of using the shared service
+ # key for a better security posture when configuring encryption at rest.
enable_encryption_at_rest = true
# Whether to enable node-to-node encryption.
enable_node_to_node_encryption = true
- # The ID of the KMS key to use to encrypt the Elasticsearch domain storage. Only
- # used if enable_encryption_at_rest. When null, uses the aws/es service KMS key.
+ # The ID of the KMS key to use to encrypt the Elasticsearch domain storage.
+ # Only used if enable_encryption_at_rest. When null, uses the aws/es service
+ # KMS key.
encryption_kms_key_id = null
# The ARNS of the IAM users and roles to which to allow full access to the
- # Elasticsearch cluster. Setting this to a restricted list is useful when using a
- # public access cluster.
+ # Elasticsearch cluster. Setting this to a restricted list is useful when
+ # using a public access cluster.
iam_principal_arns = ["*"]
# Whether the internal user database is enabled. Enable this to use master
@@ -338,43 +342,45 @@ module "elasticsearch" {
internal_user_database_enabled = false
# The baseline input/output (I/O) performance of EBS volumes attached to data
- # nodes. Must be between 1000 and 4000. Applicable only if var.volume_type is io1.
+ # nodes. Must be between 1000 and 4000. Applicable only if var.volume_type is
+ # io1.
iops = null
# Whether the cluster is publicly accessible.
is_public = false
- # The maximum amount of time, in seconds, that KMSKeyError can be in red status
- # before triggering an alarm
+ # The maximum amount of time, in seconds, that KMSKeyError can be in red
+ # status before triggering an alarm
kms_key_error_period = 60
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
kms_key_error_treat_missing_data = "missing"
- # The maximum amount of time, in seconds, that KMSKeyInaccessible can be in red
- # status before triggering an alarm
+ # The maximum amount of time, in seconds, that KMSKeyInaccessible can be in
+ # red status before triggering an alarm
kms_key_inaccessible_period = 60
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
kms_key_inaccessible_treat_missing_data = "missing"
- # The period, in seconds, over which to measure the master nodes' CPU utilization
+ # The period, in seconds, over which to measure the master nodes' CPU
+ # utilization
master_cpu_utilization_period = 900
# Trigger an alarm if the Elasticsearch cluster master nodes have a CPU
# utilization percentage above this threshold
master_cpu_utilization_threshold = 50
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
master_cpu_utilization_treat_missing_data = "missing"
# The period, in seconds, over which to measure the master nodes' JVM memory
@@ -385,10 +391,10 @@ module "elasticsearch" {
# pressure percentage above this threshold
master_jvm_memory_pressure_threshold = 80
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
master_jvm_memory_pressure_treat_missing_data = "missing"
# ARN of the master user. Only used if advanced_security_options and
@@ -400,8 +406,8 @@ module "elasticsearch" {
master_user_name = null
# Master account user password. Only used if advanced_security_options and
- # internal_user_database_enabled are set to true. WARNING: this password will be
- # stored in Terraform state.
+ # internal_user_database_enabled are set to true. WARNING: this password will
+ # be stored in Terraform state.
master_user_password = null # SENSITIVE
# Whether to monitor KMS key statistics
@@ -410,19 +416,20 @@ module "elasticsearch" {
# Whether to monitor master node statistics
monitor_master_nodes = false
- # The period, in seconds, over which to measure the master nodes' CPU utilization
+ # The period, in seconds, over which to measure the master nodes' CPU
+ # utilization
node_count_period = 86400
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
node_count_treat_missing_data = "missing"
- # List of VPC Subnet IDs for the Elasticsearch domain endpoints to be created in.
- # If var.zone_awareness_enabled is true, the first 2 or 3 provided subnet ids are
- # used, depending on var.availability_zone_count. Otherwise only the first one is
- # used.
+ # List of VPC Subnet IDs for the Elasticsearch domain endpoints to be created
+ # in. If var.zone_awareness_enabled is true, the first 2 or 3 provided subnet
+ # ids are used, depending on var.availability_zone_count. Otherwise only the
+ # first one is used.
subnet_ids = []
# The name of the TLS security policy that needs to be applied to the HTTPS
@@ -431,14 +438,15 @@ module "elasticsearch" {
# configured.
tls_security_policy = "Policy-Min-TLS-1-2-2019-07"
- # How long to wait for updates to the ES cluster before timing out and reporting
- # an error.
+ # How long to wait for updates to the ES cluster before timing out and
+ # reporting an error.
update_timeout = "90m"
# The id of the VPC to deploy into. It must be in the same region as the
# Elasticsearch domain and its tenancy must be set to Default. If
- # zone_awareness_enabled is false, the Elasticsearch cluster will have an endpoint
- # in one subnet of the VPC; otherwise it will have endpoints in two subnets.
+ # zone_awareness_enabled is false, the Elasticsearch cluster will have an
+ # endpoint in one subnet of the VPC; otherwise it will have endpoints in two
+ # subnets.
vpc_id = null
}
@@ -461,7 +469,7 @@ module "elasticsearch" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/elasticsearch?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/elasticsearch?ref=v0.104.12"
}
inputs = {
@@ -471,42 +479,42 @@ inputs = {
# ----------------------------------------------------------------------------------------------------
# The name of the Elasticsearch cluster. It must be unique to your account and
- # region, start with a lowercase letter, contain between 3 and 28 characters, and
- # contain only lowercase letters a-z, the numbers 0-9, and the hyphen (-).
+ # region, start with a lowercase letter, contain between 3 and 28 characters,
+ # and contain only lowercase letters a-z, the numbers 0-9, and the hyphen (-).
domain_name =
- # The number of instances to deploy in the Elasticsearch cluster. This must be an
- # even number if zone_awareness_enabled is true.
+ # The number of instances to deploy in the Elasticsearch cluster. This must be
+ # an even number if zone_awareness_enabled is true.
instance_count =
# The instance type to use for Elasticsearch data nodes (e.g.,
- # t2.small.elasticsearch, or m4.large.elasticsearch). For supported instance types
- # see
- # https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/aes-supp
- # rted-instance-types.html.
+ # t2.small.elasticsearch, or m4.large.elasticsearch). For supported instance
+ # types see
+ # https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/aes-supported-instance-types.html.
instance_type =
# The size in GiB of the EBS volume for each node in the cluster (e.g. 10, or
# 512). For volume size limits see
- # https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/aes-limi
- # s.html.
+ # https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/aes-limits.html.
volume_size =
- # The type of EBS volumes to use in the cluster. Must be one of: standard, gp2,
- # io1, sc1, or st1. For a comparison of EBS volume types, see
+ # The type of EBS volumes to use in the cluster. Must be one of: standard,
+ # gp2, io1, sc1, or st1. For a comparison of EBS volume types, see
# https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-volume-types.html.
volume_type =
- # Whether to deploy the Elasticsearch nodes across two Availability Zones instead
- # of one. Note that if you enable this, the instance_count MUST be an even number.
+ # Whether to deploy the Elasticsearch nodes across two Availability Zones
+ # instead of one. Note that if you enable this, the instance_count MUST be an
+ # even number.
zone_awareness_enabled =
# ----------------------------------------------------------------------------------------------------
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Key-value string pairs to specify advanced configuration options. Note that the
- # values for these configuration options must be strings (wrapped in quotes).
+ # Key-value string pairs to specify advanced configuration options. Note that
+ # the values for these configuration options must be strings (wrapped in
+ # quotes).
advanced_options = {}
# Enable fine grain access control
@@ -516,22 +524,23 @@ inputs = {
# Elasticsearch cluster.
alarm_sns_topic_arns = []
- # The list of network CIDR blocks to allow network access to Aurora from. One of
- # var.allow_connections_from_cidr_blocks or
- # var.allow_connections_from_security_groups must be specified for the database to
- # be reachable.
+ # The list of network CIDR blocks to allow network access to Aurora from. One
+ # of var.allow_connections_from_cidr_blocks or
+ # var.allow_connections_from_security_groups must be specified for the
+ # database to be reachable.
allow_connections_from_cidr_blocks = []
- # The list of IDs or Security Groups to allow network access to Aurora from. All
- # security groups must either be in the VPC specified by var.vpc_id, or a peered
- # VPC with the VPC specified by var.vpc_id. One of
+ # The list of IDs or Security Groups to allow network access to Aurora from.
+ # All security groups must either be in the VPC specified by var.vpc_id, or a
+ # peered VPC with the VPC specified by var.vpc_id. One of
# var.allow_connections_from_cidr_blocks or
- # var.allow_connections_from_security_groups must be specified for the database to
- # be reachable.
+ # var.allow_connections_from_security_groups must be specified for the
+ # database to be reachable.
allow_connections_from_security_groups = []
- # Hour during which the service takes an automated daily snapshot of the indices
- # in the domain. This setting has no effect on Elasticsearch 5.3 and later.
+ # Hour during which the service takes an automated daily snapshot of the
+ # indices in the domain. This setting has no effect on Elasticsearch 5.3 and
+ # later.
automated_snapshot_start_hour = 0
# Number of Availability Zones for the domain to use with
@@ -541,14 +550,14 @@ inputs = {
# The period, in seconds, over which to measure the CPU utilization percentage
cluster_high_cpu_utilization_period = 60
- # Trigger an alarm if the Elasticsearch cluster has a CPU utilization percentage
- # above this threshold
+ # Trigger an alarm if the Elasticsearch cluster has a CPU utilization
+ # percentage above this threshold
cluster_high_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
cluster_high_cpu_utilization_treat_missing_data = "missing"
# The period, in seconds, over which to measure the JVM heap usage percentage
@@ -557,84 +566,85 @@ inputs = {
# Trigger an alarm if the JVM heap usage percentage goes above this threshold
cluster_high_jvm_memory_pressure_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
cluster_high_jvm_memory_pressure_treat_missing_data = "missing"
- # The maximum amount of time, in seconds, that ClusterIndexWritesBlocked can be in
- # red status before triggering an alarm
+ # The maximum amount of time, in seconds, that ClusterIndexWritesBlocked can
+ # be in red status before triggering an alarm
cluster_index_writes_blocked_period = 300
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
cluster_index_writes_blocked_treat_missing_data = "missing"
# The period, in seconds, over which to measure the CPU credit balance
cluster_low_cpu_credit_balance_period = 60
- # Trigger an alarm if the CPU credit balance drops below this threshold. Only used
- # if var.instance_type is t2.xxx.
+ # Trigger an alarm if the CPU credit balance drops below this threshold. Only
+ # used if var.instance_type is t2.xxx.
cluster_low_cpu_credit_balance_threshold = 10
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
cluster_low_cpu_credit_balance_treat_missing_data = "missing"
- # The period, in seconds, over which to measure the available free storage space
+ # The period, in seconds, over which to measure the available free storage
+ # space
cluster_low_free_storage_space_period = 60
# Trigger an alarm if the amount of free storage space, in Megabytes, on the
# Elasticsearch cluster drops below this threshold
cluster_low_free_storage_space_threshold = 1024
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
cluster_low_free_storage_space_treat_missing_data = "missing"
- # The maximum amount of time, in seconds, during with the AutomatedSnapshotFailure
- # can be in red status before triggering an alarm
+ # The maximum amount of time, in seconds, during with the
+ # AutomatedSnapshotFailure can be in red status before triggering an alarm
cluster_snapshot_period = 60
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
cluster_snapshot_treat_missing_data = "missing"
- # The maximum amount of time, in seconds, during which the cluster can be in red
- # status before triggering an alarm
+ # The maximum amount of time, in seconds, during which the cluster can be in
+ # red status before triggering an alarm
cluster_status_red_period = 300
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
cluster_status_red_treat_missing_data = "missing"
# The maximum amount of time, in seconds, during which the cluster can be in
# yellow status before triggering an alarm
cluster_status_yellow_period = 300
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
cluster_status_yellow_treat_missing_data = "missing"
# Whether or not the Service Linked Role for Elasticsearch should be created
- # within this module. Normally the service linked role is created automatically by
- # AWS when creating the Elasticsearch domain in the web console, but API does not
- # implement this logic. You can either have AWS automatically manage this by
- # creating a domain manually in the console, or manage it in terraform using the
- # landing zone modules or this variable.
+ # within this module. Normally the service linked role is created
+ # automatically by AWS when creating the Elasticsearch domain in the web
+ # console, but API does not implement this logic. You can either have AWS
+ # automatically manage this by creating a domain manually in the console, or
+ # manage it in terraform using the landing zone modules or this variable.
create_service_linked_role = false
# Fully qualified domain for your custom endpoint.
@@ -646,18 +656,19 @@ inputs = {
# Whether to enable custom endpoint for the Elasticsearch domain.
custom_endpoint_enabled = false
- # A map of custom tags to apply to the ElasticSearch Domain. The key is the tag
- # name and the value is the tag value.
+ # A map of custom tags to apply to the ElasticSearch Domain. The key is the
+ # tag name and the value is the tag value.
custom_tags = {}
- # The number of dedicated master nodes to run. We recommend setting this to 3 for
- # production deployments. Only used if var.dedicated_master_enabled is true.
+ # The number of dedicated master nodes to run. We recommend setting this to 3
+ # for production deployments. Only used if var.dedicated_master_enabled is
+ # true.
dedicated_master_count = null
- # Whether to deploy separate nodes specifically for performing cluster management
- # tasks (e.g. tracking number of nodes, monitoring health, replicating changes).
- # This increases the stability of large clusters and is required for clusters with
- # more than 10 nodes.
+ # Whether to deploy separate nodes specifically for performing cluster
+ # management tasks (e.g. tracking number of nodes, monitoring health,
+ # replicating changes). This increases the stability of large clusters and is
+ # required for clusters with more than 10 nodes.
dedicated_master_enabled = false
# The instance type for the dedicated master nodes. These nodes can use a
@@ -672,28 +683,29 @@ inputs = {
# The version of Elasticsearch to deploy.
elasticsearch_version = "7.7"
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arns.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arns.
enable_cloudwatch_alarms = true
- # False by default because encryption at rest is not included in the free tier.
- # When true, the Elasticsearch domain storage will be encrypted at rest using the
- # KMS key described with var.encryption_kms_key_id. We strongly recommend
- # configuring a custom KMS key instead of using the shared service key for a
- # better security posture when configuring encryption at rest.
+ # False by default because encryption at rest is not included in the free
+ # tier. When true, the Elasticsearch domain storage will be encrypted at rest
+ # using the KMS key described with var.encryption_kms_key_id. We strongly
+ # recommend configuring a custom KMS key instead of using the shared service
+ # key for a better security posture when configuring encryption at rest.
enable_encryption_at_rest = true
# Whether to enable node-to-node encryption.
enable_node_to_node_encryption = true
- # The ID of the KMS key to use to encrypt the Elasticsearch domain storage. Only
- # used if enable_encryption_at_rest. When null, uses the aws/es service KMS key.
+ # The ID of the KMS key to use to encrypt the Elasticsearch domain storage.
+ # Only used if enable_encryption_at_rest. When null, uses the aws/es service
+ # KMS key.
encryption_kms_key_id = null
# The ARNS of the IAM users and roles to which to allow full access to the
- # Elasticsearch cluster. Setting this to a restricted list is useful when using a
- # public access cluster.
+ # Elasticsearch cluster. Setting this to a restricted list is useful when
+ # using a public access cluster.
iam_principal_arns = ["*"]
# Whether the internal user database is enabled. Enable this to use master
@@ -701,43 +713,45 @@ inputs = {
internal_user_database_enabled = false
# The baseline input/output (I/O) performance of EBS volumes attached to data
- # nodes. Must be between 1000 and 4000. Applicable only if var.volume_type is io1.
+ # nodes. Must be between 1000 and 4000. Applicable only if var.volume_type is
+ # io1.
iops = null
# Whether the cluster is publicly accessible.
is_public = false
- # The maximum amount of time, in seconds, that KMSKeyError can be in red status
- # before triggering an alarm
+ # The maximum amount of time, in seconds, that KMSKeyError can be in red
+ # status before triggering an alarm
kms_key_error_period = 60
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
kms_key_error_treat_missing_data = "missing"
- # The maximum amount of time, in seconds, that KMSKeyInaccessible can be in red
- # status before triggering an alarm
+ # The maximum amount of time, in seconds, that KMSKeyInaccessible can be in
+ # red status before triggering an alarm
kms_key_inaccessible_period = 60
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
kms_key_inaccessible_treat_missing_data = "missing"
- # The period, in seconds, over which to measure the master nodes' CPU utilization
+ # The period, in seconds, over which to measure the master nodes' CPU
+ # utilization
master_cpu_utilization_period = 900
# Trigger an alarm if the Elasticsearch cluster master nodes have a CPU
# utilization percentage above this threshold
master_cpu_utilization_threshold = 50
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
master_cpu_utilization_treat_missing_data = "missing"
# The period, in seconds, over which to measure the master nodes' JVM memory
@@ -748,10 +762,10 @@ inputs = {
# pressure percentage above this threshold
master_jvm_memory_pressure_threshold = 80
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
master_jvm_memory_pressure_treat_missing_data = "missing"
# ARN of the master user. Only used if advanced_security_options and
@@ -763,8 +777,8 @@ inputs = {
master_user_name = null
# Master account user password. Only used if advanced_security_options and
- # internal_user_database_enabled are set to true. WARNING: this password will be
- # stored in Terraform state.
+ # internal_user_database_enabled are set to true. WARNING: this password will
+ # be stored in Terraform state.
master_user_password = null # SENSITIVE
# Whether to monitor KMS key statistics
@@ -773,19 +787,20 @@ inputs = {
# Whether to monitor master node statistics
monitor_master_nodes = false
- # The period, in seconds, over which to measure the master nodes' CPU utilization
+ # The period, in seconds, over which to measure the master nodes' CPU
+ # utilization
node_count_period = 86400
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
node_count_treat_missing_data = "missing"
- # List of VPC Subnet IDs for the Elasticsearch domain endpoints to be created in.
- # If var.zone_awareness_enabled is true, the first 2 or 3 provided subnet ids are
- # used, depending on var.availability_zone_count. Otherwise only the first one is
- # used.
+ # List of VPC Subnet IDs for the Elasticsearch domain endpoints to be created
+ # in. If var.zone_awareness_enabled is true, the first 2 or 3 provided subnet
+ # ids are used, depending on var.availability_zone_count. Otherwise only the
+ # first one is used.
subnet_ids = []
# The name of the TLS security policy that needs to be applied to the HTTPS
@@ -794,14 +809,15 @@ inputs = {
# configured.
tls_security_policy = "Policy-Min-TLS-1-2-2019-07"
- # How long to wait for updates to the ES cluster before timing out and reporting
- # an error.
+ # How long to wait for updates to the ES cluster before timing out and
+ # reporting an error.
update_timeout = "90m"
# The id of the VPC to deploy into. It must be in the same region as the
# Elasticsearch domain and its tenancy must be set to Default. If
- # zone_awareness_enabled is false, the Elasticsearch cluster will have an endpoint
- # in one subnet of the VPC; otherwise it will have endpoints in two subnets.
+ # zone_awareness_enabled is false, the Elasticsearch cluster will have an
+ # endpoint in one subnet of the VPC; otherwise it will have endpoints in two
+ # subnets.
vpc_id = null
}
@@ -1533,11 +1549,11 @@ Domain-specific endpoint for Kibana without https scheme.
diff --git a/docs/reference/services/data-storage/amazon-rds.md b/docs/reference/services/data-storage/amazon-rds.md
index 2105f67df1..910f830d66 100644
--- a/docs/reference/services/data-storage/amazon-rds.md
+++ b/docs/reference/services/data-storage/amazon-rds.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon Relational Database Service
-View Source
+View Source
Release Notes
@@ -69,7 +69,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -77,12 +77,12 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
-* [How do I pass database configuration securely?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/data-stores/rds/core-concepts.md#how-do-i-pass-database-configuration-securely)
+* [How do I pass database configuration securely?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/data-stores/rds/core-concepts.md#how-do-i-pass-database-configuration-securely)
## Sample Usage
@@ -103,7 +103,7 @@ If you want to deploy this repo in production, check out the following resources
module "rds" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/rds?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/rds?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -116,8 +116,8 @@ module "rds" {
engine_version =
# The name used to namespace all the RDS resources created by these templates,
- # including the cluster and cluster instances (e.g. mysql-stage). Must be unique
- # in this region. Must be a lowercase string.
+ # including the cluster and cluster instances (e.g. mysql-stage). Must be
+ # unique in this region. Must be a lowercase string.
name =
# The list of IDs of the subnets in which to deploy RDS. The list must only
@@ -131,78 +131,79 @@ module "rds" {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and disk
- # space usage) should send notifications. Also used for the alarms if the share
- # snapshot backup job fails.
+ # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
+ # disk space usage) should send notifications. Also used for the alarms if the
+ # share snapshot backup job fails.
alarms_sns_topic_arns = []
# The list of network CIDR blocks to allow network access to RDS from. One of
# var.allow_connections_from_cidr_blocks or
- # var.allow_connections_from_security_groups must be specified for the database to
- # be reachable.
+ # var.allow_connections_from_security_groups must be specified for the
+ # database to be reachable.
allow_connections_from_cidr_blocks = []
# The list of IDs or Security Groups to allow network access to RDS from. All
- # security groups must either be in the VPC specified by var.vpc_id, or a peered
- # VPC with the VPC specified by var.vpc_id. One of
+ # security groups must either be in the VPC specified by var.vpc_id, or a
+ # peered VPC with the VPC specified by var.vpc_id. One of
# var.allow_connections_from_cidr_blocks or
- # var.allow_connections_from_security_groups must be specified for the database to
- # be reachable.
+ # var.allow_connections_from_security_groups must be specified for the
+ # database to be reachable.
allow_connections_from_security_groups = []
# Indicates whether major version upgrades (e.g. 9.4.x to 9.5.x) will ever be
- # permitted. Note that these updates must always be manually performed and will
- # never be automatically applied.
+ # permitted. Note that these updates must always be manually performed and
+ # will never be automatically applied.
allow_major_version_upgrade = true
- # If true, both the CMK's Key Policy and IAM Policies (permissions) can be used to
- # grant permissions on the CMK. If false, only the CMK's Key Policy can be used to
- # grant permissions on the CMK. False is more secure (and generally preferred),
- # but true is more flexible and convenient.
+ # If true, both the CMK's Key Policy and IAM Policies (permissions) can be
+ # used to grant permissions on the CMK. If false, only the CMK's Key Policy
+ # can be used to grant permissions on the CMK. False is more secure (and
+ # generally preferred), but true is more flexible and convenient.
allow_manage_key_permissions_with_iam = false
- # Specifies whether any cluster modifications are applied immediately, or during
- # the next maintenance window. Note that cluster modifications may cause degraded
- # performance or downtime.
+ # Specifies whether any cluster modifications are applied immediately, or
+ # during the next maintenance window. Note that cluster modifications may
+ # cause degraded performance or downtime.
apply_immediately = false
# Indicates that minor engine upgrades will be applied automatically to the DB
# instance during the maintenance window. If set to true, you should set
- # var.engine_version to MAJOR.MINOR and omit the .PATCH at the end (e.g., use 5.7
- # and not 5.7.11); otherwise, you'll get Terraform state drift. See
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/db_i
- # stance.html#engine_version for more details.
+ # var.engine_version to MAJOR.MINOR and omit the .PATCH at the end (e.g., use
+ # 5.7 and not 5.7.11); otherwise, you'll get Terraform state drift. See
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/db_instance.html#engine_version
+ # for more details.
auto_minor_version_upgrade = true
- # The name of the aws_db_security_group that is created. Defaults to var.name if
- # not specified.
+ # The name of the aws_db_security_group that is created. Defaults to var.name
+ # if not specified.
aws_db_security_group_name = null
- # How often, in seconds, the backup job is expected to run. This is the same as
- # var.schedule_expression, but unfortunately, Terraform offers no way to convert
- # rate expressions to seconds. We add a CloudWatch alarm that triggers if the
- # metric in var.create_snapshot_cloudwatch_metric_namespace isn't updated within
- # this time period, as that indicates the backup failed to run.
+ # How often, in seconds, the backup job is expected to run. This is the same
+ # as var.schedule_expression, but unfortunately, Terraform offers no way to
+ # convert rate expressions to seconds. We add a CloudWatch alarm that triggers
+ # if the metric in var.create_snapshot_cloudwatch_metric_namespace isn't
+ # updated within this time period, as that indicates the backup failed to run.
backup_job_alarm_period = 3600
# Sets how the backup job alarm should handle entering the INSUFFICIENT_DATA
# state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
backup_job_alarm_treat_missing_data = "missing"
- # How many days to keep backup snapshots around before cleaning them up. Must be 1
- # or greater to support read replicas.
+ # How many days to keep backup snapshots around before cleaning them up. Must
+ # be 1 or greater to support read replicas.
backup_retention_period = 30
# The daily time range during which automated backups are created (e.g.
- # 04:00-09:00). Time zone is UTC. Performance may be degraded while a backup runs.
+ # 04:00-09:00). Time zone is UTC. Performance may be degraded while a backup
+ # runs.
backup_window = "06:00-07:00"
- # A list of IAM ARNs for users who should be given administrator access to this
- # CMK (e.g. arn:aws:iam:::user/). If this list is
- # empty, and var.kms_key_arn is null, the ARN of the current user will be used.
+ # A list of IAM ARNs for users who should be given administrator access to
+ # this CMK (e.g. arn:aws:iam:::user/). If this
+ # list is empty, and var.kms_key_arn is null, the ARN of the current user will
+ # be used.
cmk_administrator_iam_arns = []
# A list of IAM ARNs for users from external AWS accounts who should be given
@@ -211,7 +212,8 @@ module "rds" {
# A list of IAM ARNs for users who should be given permissions to use this CMK
# (e.g. arn:aws:iam:::user/). If this list is
- # empty, and var.kms_key_arn is null, the ARN of the current user will be used.
+ # empty, and var.kms_key_arn is null, the ARN of the current user will be
+ # used.
cmk_user_iam_arns = []
# Copy all the RDS instance tags to snapshots. Default is false.
@@ -223,13 +225,14 @@ module "rds" {
# cmk_external_user_iam_arns, allow_manage_key_permissions.
create_custom_kms_key = false
- # Set to true if you want a DNS record automatically created and pointed at the
- # RDS endpoints.
+ # Set to true if you want a DNS record automatically created and pointed at
+ # the RDS endpoints.
create_route53_entry = false
- # The namespace to use for the CloudWatch metric we report every time a new RDS
- # snapshot is created. We add a CloudWatch alarm on this metric to notify us if
- # the backup job fails to run for any reason. Defaults to the cluster name.
+ # The namespace to use for the CloudWatch metric we report every time a new
+ # RDS snapshot is created. We add a CloudWatch alarm on this metric to notify
+ # us if the backup job fails to run for any reason. Defaults to the cluster
+ # name.
create_snapshot_cloudwatch_metric_namespace = null
# Configure a custom parameter group for the RDS DB. This will create a new
@@ -237,19 +240,20 @@ module "rds" {
# launched with the default parameter group.
custom_parameter_group = null
- # A map of custom tags to apply to the RDS Instance and the Security Group created
- # for it. The key is the tag name and the value is the tag value.
+ # A map of custom tags to apply to the RDS Instance and the Security Group
+ # created for it. The key is the tag name and the value is the tag value.
custom_tags = {}
- # Parameters for the cpu usage widget to output for use in a CloudWatch dashboard.
+ # Parameters for the cpu usage widget to output for use in a CloudWatch
+ # dashboard.
dashboard_cpu_usage_widget_parameters = {"height":6,"period":60,"width":8}
- # Parameters for the database connections widget to output for use in a CloudWatch
- # dashboard.
+ # Parameters for the database connections widget to output for use in a
+ # CloudWatch dashboard.
dashboard_db_connections_widget_parameters = {"height":6,"period":60,"width":8}
- # Parameters for the available disk space widget to output for use in a CloudWatch
- # dashboard.
+ # Parameters for the available disk space widget to output for use in a
+ # CloudWatch dashboard.
dashboard_disk_space_widget_parameters = {"height":6,"period":60,"width":8}
# Parameters for the available memory widget to output for use in a CloudWatch
@@ -264,46 +268,45 @@ module "rds" {
# dashboard.
dashboard_write_latency_widget_parameters = {"height":6,"period":60,"width":8}
- # The friendly name or ARN of an AWS Secrets Manager secret that contains database
- # configuration information in the format outlined by this document:
+ # The friendly name or ARN of an AWS Secrets Manager secret that contains
+ # database configuration information in the format outlined by this document:
# https://docs.aws.amazon.com/secretsmanager/latest/userguide/best-practices.html.
- # The engine, username, password, dbname, and port fields must be included in the
- # JSON. Note that even with this precaution, this information will be stored in
- # plaintext in the Terraform state file! See the following blog post for more
- # details:
- # https://blog.gruntwork.io/a-comprehensive-guide-to-managing-secrets-in-your-terr
- # form-code-1d586955ace1. If you do not wish to use Secrets Manager, leave this as
- # null, and use the master_username, master_password, db_name, engine, and port
- # variables.
+ # The engine, username, password, dbname, and port fields must be included in
+ # the JSON. Note that even with this precaution, this information will be
+ # stored in plaintext in the Terraform state file! See the following blog post
+ # for more details:
+ # https://blog.gruntwork.io/a-comprehensive-guide-to-managing-secrets-in-your-terraform-code-1d586955ace1.
+ # If you do not wish to use Secrets Manager, leave this as null, and use the
+ # master_username, master_password, db_name, engine, and port variables.
db_config_secrets_manager_id = null
- # The name for your database of up to 8 alpha-numeric characters. If you do not
- # provide a name, Amazon RDS will not create an empty database on the RDS
- # instance. This can also be provided via AWS Secrets Manager. See the description
- # of db_config_secrets_manager_id.
+ # The name for your database of up to 8 alpha-numeric characters. If you do
+ # not provide a name, Amazon RDS will not create an empty database on the RDS
+ # instance. This can also be provided via AWS Secrets Manager. See the
+ # description of db_config_secrets_manager_id.
db_name = null
- # Specifies whether to remove automated backups immediately after the DB instance
- # is deleted
+ # Specifies whether to remove automated backups immediately after the DB
+ # instance is deleted
delete_automated_backups = true
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arn.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arn.
enable_cloudwatch_alarms = true
- # When true, enable CloudWatch metrics for the manual snapshots created for the
- # purpose of sharing with another account.
+ # When true, enable CloudWatch metrics for the manual snapshots created for
+ # the purpose of sharing with another account.
enable_cloudwatch_metrics = true
- # Enable deletion protection on the RDS instance. If this is enabled, the database
- # cannot be deleted prior to disabling
+ # Enable deletion protection on the RDS instance. If this is enabled, the
+ # database cannot be deleted prior to disabling
enable_deletion_protection = false
# Set to true to enable alarms related to performance, such as read and write
# latency alarms. Set to false to disable those alarms if you aren't sure what
- # would be reasonable perf numbers for your RDS set up or if those numbers are too
- # unpredictable.
+ # would be reasonable perf numbers for your RDS set up or if those numbers are
+ # too unpredictable.
enable_perf_alarms = true
# When true, enable CloudWatch alarms for the manual snapshots created for the
@@ -312,63 +315,64 @@ module "rds" {
enable_share_snapshot_cloudwatch_alarms = true
# List of log types to enable for exporting to CloudWatch logs. If omitted, no
- # logs will be exported. Valid values (depending on engine): alert, audit, error,
- # general, listener, slowquery, trace, postgresql (PostgreSQL) and upgrade
- # (PostgreSQL).
+ # logs will be exported. Valid values (depending on engine): alert, audit,
+ # error, general, listener, slowquery, trace, postgresql (PostgreSQL) and
+ # upgrade (PostgreSQL).
enabled_cloudwatch_logs_exports = []
# The DB engine to use (e.g. mysql). This can also be provided via AWS Secrets
# Manager. See the description of db_config_secrets_manager_id.
engine = null
- # The period, in seconds, over which to measure the CPU utilization percentage.
+ # The period, in seconds, over which to measure the CPU utilization
+ # percentage.
high_cpu_utilization_period = 60
- # Trigger an alarm if the DB instance has a CPU utilization percentage above this
- # threshold.
+ # Trigger an alarm if the DB instance has a CPU utilization percentage above
+ # this threshold.
high_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_cpu_utilization_treat_missing_data = "missing"
# The period, in seconds, over which to measure the read latency.
high_read_latency_period = 60
- # Trigger an alarm if the DB instance read latency (average amount of time taken
- # per disk I/O operation), in seconds, is above this threshold.
+ # Trigger an alarm if the DB instance read latency (average amount of time
+ # taken per disk I/O operation), in seconds, is above this threshold.
high_read_latency_threshold = 5
# The period, in seconds, over which to measure the write latency.
high_write_latency_period = 60
- # Trigger an alarm if the DB instance write latency (average amount of time taken
- # per disk I/O operation), in seconds, is above this threshold.
+ # Trigger an alarm if the DB instance write latency (average amount of time
+ # taken per disk I/O operation), in seconds, is above this threshold.
high_write_latency_threshold = 5
- # The ID of the Route 53 hosted zone into which the Route 53 DNS record should be
- # written
+ # The ID of the Route 53 hosted zone into which the Route 53 DNS record should
+ # be written
hosted_zone_id = null
- # Specifies whether mappings of AWS Identity and Access Management (IAM) accounts
- # to database accounts is enabled. Disabled by default.
+ # Specifies whether mappings of AWS Identity and Access Management (IAM)
+ # accounts to database accounts is enabled. Disabled by default.
iam_database_authentication_enabled = false
# The instance type to use for the db (e.g. db.t3.micro)
instance_type = "db.t3.micro"
- # The amount of provisioned IOPS for the primary instance. Setting this implies a
- # storage_type of 'io1'. Can only be set when storage_type is 'gp3' or 'io1'. Set
- # to 0 to disable.
+ # The amount of provisioned IOPS for the primary instance. Setting this
+ # implies a storage_type of 'io1'. Can only be set when storage_type is 'gp3'
+ # or 'io1'. Set to 0 to disable.
iops = 0
- # The Amazon Resource Name (ARN) of an existing KMS customer master key (CMK) that
- # will be used to encrypt/decrypt backup files. If you leave this blank, the
- # default RDS KMS key for the account will be used. If you set
- # var.create_custom_kms_key to true, this value will be ignored and a custom key
- # will be created and used instead.
+ # The Amazon Resource Name (ARN) of an existing KMS customer master key (CMK)
+ # that will be used to encrypt/decrypt backup files. If you leave this blank,
+ # the default RDS KMS key for the account will be used. If you set
+ # var.create_custom_kms_key to true, this value will be ignored and a custom
+ # key will be created and used instead.
kms_key_arn = null
# The license model to use for this DB. Check the docs for your RDS DB for
@@ -378,14 +382,14 @@ module "rds" {
# The period, in seconds, over which to measure the available free disk space.
low_disk_space_available_period = 60
- # Trigger an alarm if the amount of disk space, in Bytes, on the DB instance drops
- # below this threshold.
+ # Trigger an alarm if the amount of disk space, in Bytes, on the DB instance
+ # drops below this threshold.
low_disk_space_available_threshold = 1000000000
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
low_disk_space_available_treat_missing_data = "missing"
# The period, in seconds, over which to measure the available free memory.
@@ -395,15 +399,15 @@ module "rds" {
# drops below this threshold.
low_memory_available_threshold = 100000000
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
low_memory_available_treat_missing_data = "missing"
- # The weekly day and time range during which system maintenance can occur (e.g.
- # wed:04:00-wed:04:30). Time zone is UTC. Performance may be degraded or there may
- # even be a downtime during maintenance windows.
+ # The weekly day and time range during which system maintenance can occur
+ # (e.g. wed:04:00-wed:04:30). Time zone is UTC. Performance may be degraded or
+ # there may even be a downtime during maintenance windows.
maintenance_window = "sun:07:00-sun:08:00"
# The value to use for the master password of the database. This can also be
@@ -416,35 +420,35 @@ module "rds" {
# db_config_secrets_manager_id.
master_username = null
- # When configured, the upper limit to which Amazon RDS can automatically scale the
- # storage of the DB instance. Configuring this will automatically ignore
+ # When configured, the upper limit to which Amazon RDS can automatically scale
+ # the storage of the DB instance. Configuring this will automatically ignore
# differences to allocated_storage. Must be greater than or equal to
# allocated_storage or 0 to disable Storage Autoscaling.
max_allocated_storage = 0
- # The interval, in seconds, between points when Enhanced Monitoring metrics are
- # collected for the DB instance. To disable collecting Enhanced Monitoring
- # metrics, specify 0. Valid Values: 0, 1, 5, 10, 15, 30, 60. Enhanced Monitoring
- # metrics are useful when you want to see how different processes or threads on a
- # DB instance use the CPU.
+ # The interval, in seconds, between points when Enhanced Monitoring metrics
+ # are collected for the DB instance. To disable collecting Enhanced Monitoring
+ # metrics, specify 0. Valid Values: 0, 1, 5, 10, 15, 30, 60. Enhanced
+ # Monitoring metrics are useful when you want to see how different processes
+ # or threads on a DB instance use the CPU.
monitoring_interval = 0
- # The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to
- # CloudWatch Logs. If monitoring_interval is greater than 0, but
- # monitoring_role_arn is left as an empty string, a default IAM role that allows
- # enhanced monitoring will be created.
+ # The ARN for the IAM role that permits RDS to send enhanced monitoring
+ # metrics to CloudWatch Logs. If monitoring_interval is greater than 0, but
+ # monitoring_role_arn is left as an empty string, a default IAM role that
+ # allows enhanced monitoring will be created.
monitoring_role_arn = null
- # Optionally add a path to the IAM monitoring role. If left blank, it will default
- # to just /.
+ # Optionally add a path to the IAM monitoring role. If left blank, it will
+ # default to just /.
monitoring_role_arn_path = "/"
# The name of the enhanced_monitoring_role that is created. Defaults to
# var.name-monitoring-role if not specified.
monitoring_role_name = null
- # Specifies if a standby instance should be deployed in another availability zone.
- # If the primary fails, this instance will automatically take over.
+ # Specifies if a standby instance should be deployed in another availability
+ # zone. If the primary fails, this instance will automatically take over.
multi_az = false
# The number of read replicas to deploy
@@ -453,80 +457,84 @@ module "rds" {
# Name of a DB option group to associate.
option_group_name = null
- # Specifies whether Performance Insights are enabled. Performance Insights can be
- # enabled for specific versions of database engines. See
+ # Specifies whether Performance Insights are enabled. Performance Insights can
+ # be enabled for specific versions of database engines. See
# https://aws.amazon.com/rds/performance-insights/ for more details.
performance_insights_enabled = false
- # The port the DB will listen on (e.g. 3306). Alternatively, this can be provided
- # via AWS Secrets Manager. See the description of db_config_secrets_manager_id.
+ # The port the DB will listen on (e.g. 3306). Alternatively, this can be
+ # provided via AWS Secrets Manager. See the description of
+ # db_config_secrets_manager_id.
port = null
- # The domain name to create a route 53 record for the primary endpoint of the RDS
- # database.
+ # The domain name to create a route 53 record for the primary endpoint of the
+ # RDS database.
primary_domain_name = null
- # If you wish to make your database accessible from the public Internet, set this
- # flag to true (WARNING: NOT RECOMMENDED FOR REGULAR USAGE!!). The default is
- # false, which means the database is only accessible from within the VPC, which is
- # much more secure. This flag MUST be false for serverless mode.
+ # If you wish to make your database accessible from the public Internet, set
+ # this flag to true (WARNING: NOT RECOMMENDED FOR REGULAR USAGE!!). The
+ # default is false, which means the database is only accessible from within
+ # the VPC, which is much more secure. This flag MUST be false for serverless
+ # mode.
publicly_accessible = false
# How many days to keep backup snapshots around before cleaning them up on the
- # read replicas. Must be 1 or greater to support read replicas. 0 means disable
- # automated backups.
+ # read replicas. Must be 1 or greater to support read replicas. 0 means
+ # disable automated backups.
replica_backup_retention_period = 0
# The domain name to create a route 53 record for the read replicas of the RDS
# database.
replica_domain_name = null
- # The maximum number of snapshots to keep around for the purpose of cross account
- # sharing. Once this number is exceeded, a lambda function will delete the oldest
- # snapshots. Only used if var.share_snapshot_with_another_account is true.
+ # The maximum number of snapshots to keep around for the purpose of cross
+ # account sharing. Once this number is exceeded, a lambda function will delete
+ # the oldest snapshots. Only used if var.share_snapshot_with_another_account
+ # is true.
share_snapshot_max_snapshots = 30
# An expression that defines how often to run the lambda function to take
- # snapshots for the purpose of cross account sharing. For example, cron(0 20 * * ?
- # *) or rate(5 minutes). Required if var.share_snapshot_with_another_account is
- # true
+ # snapshots for the purpose of cross account sharing. For example, cron(0 20 *
+ # * ? *) or rate(5 minutes). Required if
+ # var.share_snapshot_with_another_account is true
share_snapshot_schedule_expression = null
- # The ID of the AWS Account that the snapshot should be shared with. Required if
- # var.share_snapshot_with_another_account is true.
+ # The ID of the AWS Account that the snapshot should be shared with. Required
+ # if var.share_snapshot_with_another_account is true.
share_snapshot_with_account_id = null
- # If set to true, take periodic snapshots of the RDS DB that should be shared with
- # another account.
+ # If set to true, take periodic snapshots of the RDS DB that should be shared
+ # with another account.
share_snapshot_with_another_account = false
# Determines whether a final DB snapshot is created before the DB instance is
- # deleted. Be very careful setting this to true; if you do, and you delete this DB
- # instance, you will not have any backups of the data! You almost never want to
- # set this to true, unless you are doing automated or manual testing.
+ # deleted. Be very careful setting this to true; if you do, and you delete
+ # this DB instance, you will not have any backups of the data! You almost
+ # never want to set this to true, unless you are doing automated or manual
+ # testing.
skip_final_snapshot = false
- # If non-null, the RDS Instance will be restored from the given Snapshot ID. This
- # is the Snapshot ID you'd find in the RDS console, e.g:
+ # If non-null, the RDS Instance will be restored from the given Snapshot ID.
+ # This is the Snapshot ID you'd find in the RDS console, e.g:
# rds:production-2015-06-26-06-05.
snapshot_identifier = null
# Specifies whether the DB instance is encrypted.
storage_encrypted = true
- # The type of storage to use for the primary instance. Must be one of 'standard'
- # (magnetic), 'gp2' (general purpose SSD), 'gp3' (general purpose SSD that needs
- # iops independently), or 'io1' (provisioned IOPS SSD).
+ # The type of storage to use for the primary instance. Must be one of
+ # 'standard' (magnetic), 'gp2' (general purpose SSD), 'gp3' (general purpose
+ # SSD that needs iops independently), or 'io1' (provisioned IOPS SSD).
storage_type = "gp2"
- # Trigger an alarm if the number of connections to the DB instance goes above this
- # threshold.
+ # Trigger an alarm if the number of connections to the DB instance goes above
+ # this threshold.
too_many_db_connections_threshold = null
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
too_many_db_connections_treat_missing_data = "missing"
}
@@ -549,7 +557,7 @@ module "rds" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/rds?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/rds?ref=v0.104.12"
}
inputs = {
@@ -565,8 +573,8 @@ inputs = {
engine_version =
# The name used to namespace all the RDS resources created by these templates,
- # including the cluster and cluster instances (e.g. mysql-stage). Must be unique
- # in this region. Must be a lowercase string.
+ # including the cluster and cluster instances (e.g. mysql-stage). Must be
+ # unique in this region. Must be a lowercase string.
name =
# The list of IDs of the subnets in which to deploy RDS. The list must only
@@ -580,78 +588,79 @@ inputs = {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and disk
- # space usage) should send notifications. Also used for the alarms if the share
- # snapshot backup job fails.
+ # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
+ # disk space usage) should send notifications. Also used for the alarms if the
+ # share snapshot backup job fails.
alarms_sns_topic_arns = []
# The list of network CIDR blocks to allow network access to RDS from. One of
# var.allow_connections_from_cidr_blocks or
- # var.allow_connections_from_security_groups must be specified for the database to
- # be reachable.
+ # var.allow_connections_from_security_groups must be specified for the
+ # database to be reachable.
allow_connections_from_cidr_blocks = []
# The list of IDs or Security Groups to allow network access to RDS from. All
- # security groups must either be in the VPC specified by var.vpc_id, or a peered
- # VPC with the VPC specified by var.vpc_id. One of
+ # security groups must either be in the VPC specified by var.vpc_id, or a
+ # peered VPC with the VPC specified by var.vpc_id. One of
# var.allow_connections_from_cidr_blocks or
- # var.allow_connections_from_security_groups must be specified for the database to
- # be reachable.
+ # var.allow_connections_from_security_groups must be specified for the
+ # database to be reachable.
allow_connections_from_security_groups = []
# Indicates whether major version upgrades (e.g. 9.4.x to 9.5.x) will ever be
- # permitted. Note that these updates must always be manually performed and will
- # never be automatically applied.
+ # permitted. Note that these updates must always be manually performed and
+ # will never be automatically applied.
allow_major_version_upgrade = true
- # If true, both the CMK's Key Policy and IAM Policies (permissions) can be used to
- # grant permissions on the CMK. If false, only the CMK's Key Policy can be used to
- # grant permissions on the CMK. False is more secure (and generally preferred),
- # but true is more flexible and convenient.
+ # If true, both the CMK's Key Policy and IAM Policies (permissions) can be
+ # used to grant permissions on the CMK. If false, only the CMK's Key Policy
+ # can be used to grant permissions on the CMK. False is more secure (and
+ # generally preferred), but true is more flexible and convenient.
allow_manage_key_permissions_with_iam = false
- # Specifies whether any cluster modifications are applied immediately, or during
- # the next maintenance window. Note that cluster modifications may cause degraded
- # performance or downtime.
+ # Specifies whether any cluster modifications are applied immediately, or
+ # during the next maintenance window. Note that cluster modifications may
+ # cause degraded performance or downtime.
apply_immediately = false
# Indicates that minor engine upgrades will be applied automatically to the DB
# instance during the maintenance window. If set to true, you should set
- # var.engine_version to MAJOR.MINOR and omit the .PATCH at the end (e.g., use 5.7
- # and not 5.7.11); otherwise, you'll get Terraform state drift. See
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/db_i
- # stance.html#engine_version for more details.
+ # var.engine_version to MAJOR.MINOR and omit the .PATCH at the end (e.g., use
+ # 5.7 and not 5.7.11); otherwise, you'll get Terraform state drift. See
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/db_instance.html#engine_version
+ # for more details.
auto_minor_version_upgrade = true
- # The name of the aws_db_security_group that is created. Defaults to var.name if
- # not specified.
+ # The name of the aws_db_security_group that is created. Defaults to var.name
+ # if not specified.
aws_db_security_group_name = null
- # How often, in seconds, the backup job is expected to run. This is the same as
- # var.schedule_expression, but unfortunately, Terraform offers no way to convert
- # rate expressions to seconds. We add a CloudWatch alarm that triggers if the
- # metric in var.create_snapshot_cloudwatch_metric_namespace isn't updated within
- # this time period, as that indicates the backup failed to run.
+ # How often, in seconds, the backup job is expected to run. This is the same
+ # as var.schedule_expression, but unfortunately, Terraform offers no way to
+ # convert rate expressions to seconds. We add a CloudWatch alarm that triggers
+ # if the metric in var.create_snapshot_cloudwatch_metric_namespace isn't
+ # updated within this time period, as that indicates the backup failed to run.
backup_job_alarm_period = 3600
# Sets how the backup job alarm should handle entering the INSUFFICIENT_DATA
# state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
backup_job_alarm_treat_missing_data = "missing"
- # How many days to keep backup snapshots around before cleaning them up. Must be 1
- # or greater to support read replicas.
+ # How many days to keep backup snapshots around before cleaning them up. Must
+ # be 1 or greater to support read replicas.
backup_retention_period = 30
# The daily time range during which automated backups are created (e.g.
- # 04:00-09:00). Time zone is UTC. Performance may be degraded while a backup runs.
+ # 04:00-09:00). Time zone is UTC. Performance may be degraded while a backup
+ # runs.
backup_window = "06:00-07:00"
- # A list of IAM ARNs for users who should be given administrator access to this
- # CMK (e.g. arn:aws:iam:::user/). If this list is
- # empty, and var.kms_key_arn is null, the ARN of the current user will be used.
+ # A list of IAM ARNs for users who should be given administrator access to
+ # this CMK (e.g. arn:aws:iam:::user/). If this
+ # list is empty, and var.kms_key_arn is null, the ARN of the current user will
+ # be used.
cmk_administrator_iam_arns = []
# A list of IAM ARNs for users from external AWS accounts who should be given
@@ -660,7 +669,8 @@ inputs = {
# A list of IAM ARNs for users who should be given permissions to use this CMK
# (e.g. arn:aws:iam:::user/). If this list is
- # empty, and var.kms_key_arn is null, the ARN of the current user will be used.
+ # empty, and var.kms_key_arn is null, the ARN of the current user will be
+ # used.
cmk_user_iam_arns = []
# Copy all the RDS instance tags to snapshots. Default is false.
@@ -672,13 +682,14 @@ inputs = {
# cmk_external_user_iam_arns, allow_manage_key_permissions.
create_custom_kms_key = false
- # Set to true if you want a DNS record automatically created and pointed at the
- # RDS endpoints.
+ # Set to true if you want a DNS record automatically created and pointed at
+ # the RDS endpoints.
create_route53_entry = false
- # The namespace to use for the CloudWatch metric we report every time a new RDS
- # snapshot is created. We add a CloudWatch alarm on this metric to notify us if
- # the backup job fails to run for any reason. Defaults to the cluster name.
+ # The namespace to use for the CloudWatch metric we report every time a new
+ # RDS snapshot is created. We add a CloudWatch alarm on this metric to notify
+ # us if the backup job fails to run for any reason. Defaults to the cluster
+ # name.
create_snapshot_cloudwatch_metric_namespace = null
# Configure a custom parameter group for the RDS DB. This will create a new
@@ -686,19 +697,20 @@ inputs = {
# launched with the default parameter group.
custom_parameter_group = null
- # A map of custom tags to apply to the RDS Instance and the Security Group created
- # for it. The key is the tag name and the value is the tag value.
+ # A map of custom tags to apply to the RDS Instance and the Security Group
+ # created for it. The key is the tag name and the value is the tag value.
custom_tags = {}
- # Parameters for the cpu usage widget to output for use in a CloudWatch dashboard.
+ # Parameters for the cpu usage widget to output for use in a CloudWatch
+ # dashboard.
dashboard_cpu_usage_widget_parameters = {"height":6,"period":60,"width":8}
- # Parameters for the database connections widget to output for use in a CloudWatch
- # dashboard.
+ # Parameters for the database connections widget to output for use in a
+ # CloudWatch dashboard.
dashboard_db_connections_widget_parameters = {"height":6,"period":60,"width":8}
- # Parameters for the available disk space widget to output for use in a CloudWatch
- # dashboard.
+ # Parameters for the available disk space widget to output for use in a
+ # CloudWatch dashboard.
dashboard_disk_space_widget_parameters = {"height":6,"period":60,"width":8}
# Parameters for the available memory widget to output for use in a CloudWatch
@@ -713,46 +725,45 @@ inputs = {
# dashboard.
dashboard_write_latency_widget_parameters = {"height":6,"period":60,"width":8}
- # The friendly name or ARN of an AWS Secrets Manager secret that contains database
- # configuration information in the format outlined by this document:
+ # The friendly name or ARN of an AWS Secrets Manager secret that contains
+ # database configuration information in the format outlined by this document:
# https://docs.aws.amazon.com/secretsmanager/latest/userguide/best-practices.html.
- # The engine, username, password, dbname, and port fields must be included in the
- # JSON. Note that even with this precaution, this information will be stored in
- # plaintext in the Terraform state file! See the following blog post for more
- # details:
- # https://blog.gruntwork.io/a-comprehensive-guide-to-managing-secrets-in-your-terr
- # form-code-1d586955ace1. If you do not wish to use Secrets Manager, leave this as
- # null, and use the master_username, master_password, db_name, engine, and port
- # variables.
+ # The engine, username, password, dbname, and port fields must be included in
+ # the JSON. Note that even with this precaution, this information will be
+ # stored in plaintext in the Terraform state file! See the following blog post
+ # for more details:
+ # https://blog.gruntwork.io/a-comprehensive-guide-to-managing-secrets-in-your-terraform-code-1d586955ace1.
+ # If you do not wish to use Secrets Manager, leave this as null, and use the
+ # master_username, master_password, db_name, engine, and port variables.
db_config_secrets_manager_id = null
- # The name for your database of up to 8 alpha-numeric characters. If you do not
- # provide a name, Amazon RDS will not create an empty database on the RDS
- # instance. This can also be provided via AWS Secrets Manager. See the description
- # of db_config_secrets_manager_id.
+ # The name for your database of up to 8 alpha-numeric characters. If you do
+ # not provide a name, Amazon RDS will not create an empty database on the RDS
+ # instance. This can also be provided via AWS Secrets Manager. See the
+ # description of db_config_secrets_manager_id.
db_name = null
- # Specifies whether to remove automated backups immediately after the DB instance
- # is deleted
+ # Specifies whether to remove automated backups immediately after the DB
+ # instance is deleted
delete_automated_backups = true
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arn.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arn.
enable_cloudwatch_alarms = true
- # When true, enable CloudWatch metrics for the manual snapshots created for the
- # purpose of sharing with another account.
+ # When true, enable CloudWatch metrics for the manual snapshots created for
+ # the purpose of sharing with another account.
enable_cloudwatch_metrics = true
- # Enable deletion protection on the RDS instance. If this is enabled, the database
- # cannot be deleted prior to disabling
+ # Enable deletion protection on the RDS instance. If this is enabled, the
+ # database cannot be deleted prior to disabling
enable_deletion_protection = false
# Set to true to enable alarms related to performance, such as read and write
# latency alarms. Set to false to disable those alarms if you aren't sure what
- # would be reasonable perf numbers for your RDS set up or if those numbers are too
- # unpredictable.
+ # would be reasonable perf numbers for your RDS set up or if those numbers are
+ # too unpredictable.
enable_perf_alarms = true
# When true, enable CloudWatch alarms for the manual snapshots created for the
@@ -761,63 +772,64 @@ inputs = {
enable_share_snapshot_cloudwatch_alarms = true
# List of log types to enable for exporting to CloudWatch logs. If omitted, no
- # logs will be exported. Valid values (depending on engine): alert, audit, error,
- # general, listener, slowquery, trace, postgresql (PostgreSQL) and upgrade
- # (PostgreSQL).
+ # logs will be exported. Valid values (depending on engine): alert, audit,
+ # error, general, listener, slowquery, trace, postgresql (PostgreSQL) and
+ # upgrade (PostgreSQL).
enabled_cloudwatch_logs_exports = []
# The DB engine to use (e.g. mysql). This can also be provided via AWS Secrets
# Manager. See the description of db_config_secrets_manager_id.
engine = null
- # The period, in seconds, over which to measure the CPU utilization percentage.
+ # The period, in seconds, over which to measure the CPU utilization
+ # percentage.
high_cpu_utilization_period = 60
- # Trigger an alarm if the DB instance has a CPU utilization percentage above this
- # threshold.
+ # Trigger an alarm if the DB instance has a CPU utilization percentage above
+ # this threshold.
high_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_cpu_utilization_treat_missing_data = "missing"
# The period, in seconds, over which to measure the read latency.
high_read_latency_period = 60
- # Trigger an alarm if the DB instance read latency (average amount of time taken
- # per disk I/O operation), in seconds, is above this threshold.
+ # Trigger an alarm if the DB instance read latency (average amount of time
+ # taken per disk I/O operation), in seconds, is above this threshold.
high_read_latency_threshold = 5
# The period, in seconds, over which to measure the write latency.
high_write_latency_period = 60
- # Trigger an alarm if the DB instance write latency (average amount of time taken
- # per disk I/O operation), in seconds, is above this threshold.
+ # Trigger an alarm if the DB instance write latency (average amount of time
+ # taken per disk I/O operation), in seconds, is above this threshold.
high_write_latency_threshold = 5
- # The ID of the Route 53 hosted zone into which the Route 53 DNS record should be
- # written
+ # The ID of the Route 53 hosted zone into which the Route 53 DNS record should
+ # be written
hosted_zone_id = null
- # Specifies whether mappings of AWS Identity and Access Management (IAM) accounts
- # to database accounts is enabled. Disabled by default.
+ # Specifies whether mappings of AWS Identity and Access Management (IAM)
+ # accounts to database accounts is enabled. Disabled by default.
iam_database_authentication_enabled = false
# The instance type to use for the db (e.g. db.t3.micro)
instance_type = "db.t3.micro"
- # The amount of provisioned IOPS for the primary instance. Setting this implies a
- # storage_type of 'io1'. Can only be set when storage_type is 'gp3' or 'io1'. Set
- # to 0 to disable.
+ # The amount of provisioned IOPS for the primary instance. Setting this
+ # implies a storage_type of 'io1'. Can only be set when storage_type is 'gp3'
+ # or 'io1'. Set to 0 to disable.
iops = 0
- # The Amazon Resource Name (ARN) of an existing KMS customer master key (CMK) that
- # will be used to encrypt/decrypt backup files. If you leave this blank, the
- # default RDS KMS key for the account will be used. If you set
- # var.create_custom_kms_key to true, this value will be ignored and a custom key
- # will be created and used instead.
+ # The Amazon Resource Name (ARN) of an existing KMS customer master key (CMK)
+ # that will be used to encrypt/decrypt backup files. If you leave this blank,
+ # the default RDS KMS key for the account will be used. If you set
+ # var.create_custom_kms_key to true, this value will be ignored and a custom
+ # key will be created and used instead.
kms_key_arn = null
# The license model to use for this DB. Check the docs for your RDS DB for
@@ -827,14 +839,14 @@ inputs = {
# The period, in seconds, over which to measure the available free disk space.
low_disk_space_available_period = 60
- # Trigger an alarm if the amount of disk space, in Bytes, on the DB instance drops
- # below this threshold.
+ # Trigger an alarm if the amount of disk space, in Bytes, on the DB instance
+ # drops below this threshold.
low_disk_space_available_threshold = 1000000000
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
low_disk_space_available_treat_missing_data = "missing"
# The period, in seconds, over which to measure the available free memory.
@@ -844,15 +856,15 @@ inputs = {
# drops below this threshold.
low_memory_available_threshold = 100000000
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
low_memory_available_treat_missing_data = "missing"
- # The weekly day and time range during which system maintenance can occur (e.g.
- # wed:04:00-wed:04:30). Time zone is UTC. Performance may be degraded or there may
- # even be a downtime during maintenance windows.
+ # The weekly day and time range during which system maintenance can occur
+ # (e.g. wed:04:00-wed:04:30). Time zone is UTC. Performance may be degraded or
+ # there may even be a downtime during maintenance windows.
maintenance_window = "sun:07:00-sun:08:00"
# The value to use for the master password of the database. This can also be
@@ -865,35 +877,35 @@ inputs = {
# db_config_secrets_manager_id.
master_username = null
- # When configured, the upper limit to which Amazon RDS can automatically scale the
- # storage of the DB instance. Configuring this will automatically ignore
+ # When configured, the upper limit to which Amazon RDS can automatically scale
+ # the storage of the DB instance. Configuring this will automatically ignore
# differences to allocated_storage. Must be greater than or equal to
# allocated_storage or 0 to disable Storage Autoscaling.
max_allocated_storage = 0
- # The interval, in seconds, between points when Enhanced Monitoring metrics are
- # collected for the DB instance. To disable collecting Enhanced Monitoring
- # metrics, specify 0. Valid Values: 0, 1, 5, 10, 15, 30, 60. Enhanced Monitoring
- # metrics are useful when you want to see how different processes or threads on a
- # DB instance use the CPU.
+ # The interval, in seconds, between points when Enhanced Monitoring metrics
+ # are collected for the DB instance. To disable collecting Enhanced Monitoring
+ # metrics, specify 0. Valid Values: 0, 1, 5, 10, 15, 30, 60. Enhanced
+ # Monitoring metrics are useful when you want to see how different processes
+ # or threads on a DB instance use the CPU.
monitoring_interval = 0
- # The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to
- # CloudWatch Logs. If monitoring_interval is greater than 0, but
- # monitoring_role_arn is left as an empty string, a default IAM role that allows
- # enhanced monitoring will be created.
+ # The ARN for the IAM role that permits RDS to send enhanced monitoring
+ # metrics to CloudWatch Logs. If monitoring_interval is greater than 0, but
+ # monitoring_role_arn is left as an empty string, a default IAM role that
+ # allows enhanced monitoring will be created.
monitoring_role_arn = null
- # Optionally add a path to the IAM monitoring role. If left blank, it will default
- # to just /.
+ # Optionally add a path to the IAM monitoring role. If left blank, it will
+ # default to just /.
monitoring_role_arn_path = "/"
# The name of the enhanced_monitoring_role that is created. Defaults to
# var.name-monitoring-role if not specified.
monitoring_role_name = null
- # Specifies if a standby instance should be deployed in another availability zone.
- # If the primary fails, this instance will automatically take over.
+ # Specifies if a standby instance should be deployed in another availability
+ # zone. If the primary fails, this instance will automatically take over.
multi_az = false
# The number of read replicas to deploy
@@ -902,80 +914,84 @@ inputs = {
# Name of a DB option group to associate.
option_group_name = null
- # Specifies whether Performance Insights are enabled. Performance Insights can be
- # enabled for specific versions of database engines. See
+ # Specifies whether Performance Insights are enabled. Performance Insights can
+ # be enabled for specific versions of database engines. See
# https://aws.amazon.com/rds/performance-insights/ for more details.
performance_insights_enabled = false
- # The port the DB will listen on (e.g. 3306). Alternatively, this can be provided
- # via AWS Secrets Manager. See the description of db_config_secrets_manager_id.
+ # The port the DB will listen on (e.g. 3306). Alternatively, this can be
+ # provided via AWS Secrets Manager. See the description of
+ # db_config_secrets_manager_id.
port = null
- # The domain name to create a route 53 record for the primary endpoint of the RDS
- # database.
+ # The domain name to create a route 53 record for the primary endpoint of the
+ # RDS database.
primary_domain_name = null
- # If you wish to make your database accessible from the public Internet, set this
- # flag to true (WARNING: NOT RECOMMENDED FOR REGULAR USAGE!!). The default is
- # false, which means the database is only accessible from within the VPC, which is
- # much more secure. This flag MUST be false for serverless mode.
+ # If you wish to make your database accessible from the public Internet, set
+ # this flag to true (WARNING: NOT RECOMMENDED FOR REGULAR USAGE!!). The
+ # default is false, which means the database is only accessible from within
+ # the VPC, which is much more secure. This flag MUST be false for serverless
+ # mode.
publicly_accessible = false
# How many days to keep backup snapshots around before cleaning them up on the
- # read replicas. Must be 1 or greater to support read replicas. 0 means disable
- # automated backups.
+ # read replicas. Must be 1 or greater to support read replicas. 0 means
+ # disable automated backups.
replica_backup_retention_period = 0
# The domain name to create a route 53 record for the read replicas of the RDS
# database.
replica_domain_name = null
- # The maximum number of snapshots to keep around for the purpose of cross account
- # sharing. Once this number is exceeded, a lambda function will delete the oldest
- # snapshots. Only used if var.share_snapshot_with_another_account is true.
+ # The maximum number of snapshots to keep around for the purpose of cross
+ # account sharing. Once this number is exceeded, a lambda function will delete
+ # the oldest snapshots. Only used if var.share_snapshot_with_another_account
+ # is true.
share_snapshot_max_snapshots = 30
# An expression that defines how often to run the lambda function to take
- # snapshots for the purpose of cross account sharing. For example, cron(0 20 * * ?
- # *) or rate(5 minutes). Required if var.share_snapshot_with_another_account is
- # true
+ # snapshots for the purpose of cross account sharing. For example, cron(0 20 *
+ # * ? *) or rate(5 minutes). Required if
+ # var.share_snapshot_with_another_account is true
share_snapshot_schedule_expression = null
- # The ID of the AWS Account that the snapshot should be shared with. Required if
- # var.share_snapshot_with_another_account is true.
+ # The ID of the AWS Account that the snapshot should be shared with. Required
+ # if var.share_snapshot_with_another_account is true.
share_snapshot_with_account_id = null
- # If set to true, take periodic snapshots of the RDS DB that should be shared with
- # another account.
+ # If set to true, take periodic snapshots of the RDS DB that should be shared
+ # with another account.
share_snapshot_with_another_account = false
# Determines whether a final DB snapshot is created before the DB instance is
- # deleted. Be very careful setting this to true; if you do, and you delete this DB
- # instance, you will not have any backups of the data! You almost never want to
- # set this to true, unless you are doing automated or manual testing.
+ # deleted. Be very careful setting this to true; if you do, and you delete
+ # this DB instance, you will not have any backups of the data! You almost
+ # never want to set this to true, unless you are doing automated or manual
+ # testing.
skip_final_snapshot = false
- # If non-null, the RDS Instance will be restored from the given Snapshot ID. This
- # is the Snapshot ID you'd find in the RDS console, e.g:
+ # If non-null, the RDS Instance will be restored from the given Snapshot ID.
+ # This is the Snapshot ID you'd find in the RDS console, e.g:
# rds:production-2015-06-26-06-05.
snapshot_identifier = null
# Specifies whether the DB instance is encrypted.
storage_encrypted = true
- # The type of storage to use for the primary instance. Must be one of 'standard'
- # (magnetic), 'gp2' (general purpose SSD), 'gp3' (general purpose SSD that needs
- # iops independently), or 'io1' (provisioned IOPS SSD).
+ # The type of storage to use for the primary instance. Must be one of
+ # 'standard' (magnetic), 'gp2' (general purpose SSD), 'gp3' (general purpose
+ # SSD that needs iops independently), or 'io1' (provisioned IOPS SSD).
storage_type = "gp2"
- # Trigger an alarm if the number of connections to the DB instance goes above this
- # threshold.
+ # Trigger an alarm if the number of connections to the DB instance goes above
+ # this threshold.
too_many_db_connections_threshold = null
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
too_many_db_connections_treat_missing_data = "missing"
}
@@ -2326,11 +2342,11 @@ The ID of the Security Group that controls access to the RDS DB instance.
diff --git a/docs/reference/services/data-storage/s-3-bucket.md b/docs/reference/services/data-storage/s-3-bucket.md
index 62cda978e8..209a9bd5c0 100644
--- a/docs/reference/services/data-storage/s-3-bucket.md
+++ b/docs/reference/services/data-storage/s-3-bucket.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# S3 Bucket
-View Source
+View Source
Release Notes
@@ -59,7 +59,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -67,7 +67,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -88,40 +88,40 @@ If you want to deploy this repo in production, check out the following resources
module "s_3_bucket" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/s3-bucket?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/s3-bucket?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
# ----------------------------------------------------------------------------------------------------
- # What to name the S3 bucket. Note that S3 bucket names must be globally unique
- # across all AWS users!
+ # What to name the S3 bucket. Note that S3 bucket names must be globally
+ # unique across all AWS users!
primary_bucket =
# ----------------------------------------------------------------------------------------------------
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # The S3 bucket where access logs for this bucket should be stored. Set to null to
- # disable access logging.
+ # The S3 bucket where access logs for this bucket should be stored. Set to
+ # null to disable access logging.
access_logging_bucket = null
# The lifecycle rules for the access logs bucket. See var.lifecycle_rules for
# details.
access_logging_bucket_lifecycle_rules = {}
- # Configure who will be the default owner of objects uploaded to the access logs
- # S3 bucket: must be one of BucketOwnerPreferred (the bucket owner owns objects),
- # ObjectWriter (the writer of each object owns that object). Note that this
- # setting only takes effect if the object is uploaded with the
+ # Configure who will be the default owner of objects uploaded to the access
+ # logs S3 bucket: must be one of BucketOwnerPreferred (the bucket owner owns
+ # objects), ObjectWriter (the writer of each object owns that object). Note
+ # that this setting only takes effect if the object is uploaded with the
# bucket-owner-full-control canned ACL. See
- # https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html for
- # more info.
+ # https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html
+ # for more info.
access_logging_bucket_ownership = "ObjectWriter"
- # The IAM policy to apply to the S3 bucket used to store access logs. You can use
- # this to grant read/write access. This should be a map, where each key is a
- # unique statement ID (SID), and each value is an object that contains the
+ # The IAM policy to apply to the S3 bucket used to store access logs. You can
+ # use this to grant read/write access. This should be a map, where each key is
+ # a unique statement ID (SID), and each value is an object that contains the
# parameters defined in the comment above.
access_logging_bucket_policy_statements = {}
@@ -129,32 +129,32 @@ module "s_3_bucket" {
# access_logging_bucket. Only used if access_logging_bucket is specified.
access_logging_prefix = null
- # The canned ACL to apply. See comment above for the list of possible ACLs. If not
- # `null` bucket_ownership cannot be BucketOwnerEnforced
+ # The canned ACL to apply. See comment above for the list of possible ACLs. If
+ # not `null` bucket_ownership cannot be BucketOwnerEnforced
acl = null
# Optional whether or not to use Amazon S3 Bucket Keys for SSE-KMS.
bucket_key_enabled = false
- # Optional KMS key to use for encrypting data in the S3 bucket. If null, data in
- # S3 will be encrypted using the default aws/s3 key. If provided, the key policy
- # of the provided key must allow whoever is writing to this bucket to use that
- # key.
+ # Optional KMS key to use for encrypting data in the S3 bucket. If null, data
+ # in S3 will be encrypted using the default aws/s3 key. If provided, the key
+ # policy of the provided key must allow whoever is writing to this bucket to
+ # use that key.
bucket_kms_key_arn = null
- # Configure who will be the default owner of objects uploaded to this S3 bucket:
- # must be one of BucketOwnerPreferred (the bucket owner owns objects),
+ # Configure who will be the default owner of objects uploaded to this S3
+ # bucket: must be one of BucketOwnerPreferred (the bucket owner owns objects),
# ObjectWriter (the writer of each object owns that object). Note that this
# setting only takes effect if the object is uploaded with the
# bucket-owner-full-control canned ACL. See
- # https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html for
- # more info.
+ # https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html
+ # for more info.
bucket_ownership = "ObjectWriter"
- # The IAM policy to apply to this S3 bucket. You can use this to grant read/write
- # access. This should be a map, where each key is a unique statement ID (SID), and
- # each value is an object that contains the parameters defined in the comment
- # above.
+ # The IAM policy to apply to this S3 bucket. You can use this to grant
+ # read/write access. This should be a map, where each key is a unique
+ # statement ID (SID), and each value is an object that contains the parameters
+ # defined in the comment above.
bucket_policy_statements = {}
# The server-side encryption algorithm to use on the bucket. Valid values are
@@ -165,8 +165,8 @@ module "s_3_bucket" {
# CORS rules to set on this S3 bucket
cors_rules = []
- # Set to true to enable server-side encryption for this bucket. You can control
- # the algorithm using var.sse_algorithm.
+ # Set to true to enable server-side encryption for this bucket. You can
+ # control the algorithm using var.sse_algorithm.
enable_sse = true
# Set to true to enable versioning for this bucket. If enabled, instead of
@@ -174,101 +174,102 @@ module "s_3_bucket" {
# object, so all the old values are retained.
enable_versioning = true
- # If set to true, when you run 'terraform destroy', delete all objects from the
- # logs bucket so that the bucket can be destroyed without error. Warning: these
- # objects are not recoverable so only use this if you're absolutely sure you want
- # to permanently delete everything!
+ # If set to true, when you run 'terraform destroy', delete all objects from
+ # the logs bucket so that the bucket can be destroyed without error. Warning:
+ # these objects are not recoverable so only use this if you're absolutely sure
+ # you want to permanently delete everything!
force_destroy_logs = false
- # If set to true, when you run 'terraform destroy', delete all objects from the
- # primary bucket so that the bucket can be destroyed without error. Warning: these
- # objects are not recoverable so only use this if you're absolutely sure you want
- # to permanently delete everything!
+ # If set to true, when you run 'terraform destroy', delete all objects from
+ # the primary bucket so that the bucket can be destroyed without error.
+ # Warning: these objects are not recoverable so only use this if you're
+ # absolutely sure you want to permanently delete everything!
force_destroy_primary = false
- # If set to true, when you run 'terraform destroy', delete all objects from the
- # replica bucket so that the bucket can be destroyed without error. Warning: these
- # objects are not recoverable so only use this if you're absolutely sure you want
- # to permanently delete everything!
+ # If set to true, when you run 'terraform destroy', delete all objects from
+ # the replica bucket so that the bucket can be destroyed without error.
+ # Warning: these objects are not recoverable so only use this if you're
+ # absolutely sure you want to permanently delete everything!
force_destroy_replica = false
# The lifecycle rules for this S3 bucket. These can be used to change storage
- # types or delete objects based on customizable rules. This should be a map, where
- # each key is a unique ID for the lifecycle rule, and each value is an object that
- # contains the parameters defined in the comment above.
+ # types or delete objects based on customizable rules. This should be a map,
+ # where each key is a unique ID for the lifecycle rule, and each value is an
+ # object that contains the parameters defined in the comment above.
lifecycle_rules = {}
# Enable MFA delete for either 'Change the versioning state of your bucket' or
# 'Permanently delete an object version'. This cannot be used to toggle this
- # setting but is available to allow managed buckets to reflect the state in AWS.
- # Only used if enable_versioning is true. For instructions on how to enable MFA
- # Delete, check out the README from the terraform-aws-security/private-s3-bucket
- # module.
+ # setting but is available to allow managed buckets to reflect the state in
+ # AWS. Only used if enable_versioning is true. For instructions on how to
+ # enable MFA Delete, check out the README from the
+ # terraform-aws-security/private-s3-bucket module.
mfa_delete = false
- # The number of days that you want to specify for the default retention period for
- # Object Locking. Only one of object_lock_days or object_lock_years can be
+ # The number of days that you want to specify for the default retention period
+ # for Object Locking. Only one of object_lock_days or object_lock_years can be
# configured. Only used if object_lock_enabled and
# object_lock_default_retention_enabled are true.
object_lock_days = null
- # Set to true to configure a default retention period for object locks when Object
- # Locking is enabled. When disabled, objects will not be protected with locking by
- # default unless explicitly configured at object creation time. Only used if
- # object_lock_enabled is true.
+ # Set to true to configure a default retention period for object locks when
+ # Object Locking is enabled. When disabled, objects will not be protected with
+ # locking by default unless explicitly configured at object creation time.
+ # Only used if object_lock_enabled is true.
object_lock_default_retention_enabled = true
- # Set to true to enable Object Locking. This prevents objects from being deleted
- # for a customizable period of time. Note that this MUST be configured at bucket
- # creation time - you cannot update an existing bucket to enable object locking
- # unless you go through AWS support. Additionally, this is not reversible - once a
- # bucket is created with object lock enabled, you cannot disable object locking
- # even with this setting. Note that enabling object locking will automatically
- # enable bucket versioning.
+ # Set to true to enable Object Locking. This prevents objects from being
+ # deleted for a customizable period of time. Note that this MUST be configured
+ # at bucket creation time - you cannot update an existing bucket to enable
+ # object locking unless you go through AWS support. Additionally, this is not
+ # reversible - once a bucket is created with object lock enabled, you cannot
+ # disable object locking even with this setting. Note that enabling object
+ # locking will automatically enable bucket versioning.
object_lock_enabled = false
- # The default Object Lock retention mode you want to apply to new objects placed
- # in this bucket. Valid values are GOVERNANCE and COMPLIANCE. Only used if
- # object_lock_enabled and object_lock_default_retention_enabled are true.
+ # The default Object Lock retention mode you want to apply to new objects
+ # placed in this bucket. Valid values are GOVERNANCE and COMPLIANCE. Only used
+ # if object_lock_enabled and object_lock_default_retention_enabled are true.
object_lock_mode = null
- # The number of years that you want to specify for the default retention period
- # for Object Locking. Only one of object_lock_days or object_lock_years can be
- # configured. Only used if object_lock_enabled and
+ # The number of years that you want to specify for the default retention
+ # period for Object Locking. Only one of object_lock_days or object_lock_years
+ # can be configured. Only used if object_lock_enabled and
# object_lock_default_retention_enabled are true.
object_lock_years = null
- # The S3 bucket that will be the replica of this bucket. Set to null to disable
- # replication.
+ # The S3 bucket that will be the replica of this bucket. Set to null to
+ # disable replication.
replica_bucket = null
- # The canned ACL to apply. See comment above for the list of possible ACLs. If not
- # `null` bucket_ownership cannot be BucketOwnerEnforced
+ # The canned ACL to apply. See comment above for the list of possible ACLs. If
+ # not `null` bucket_ownership cannot be BucketOwnerEnforced
replica_bucket_acl = null
# If set to true, replica bucket will be expected to already exist.
replica_bucket_already_exists = false
- # Optional whether or not to use Amazon S3 Bucket Keys for SSE-KMS for the replica
- # bucket.
+ # Optional whether or not to use Amazon S3 Bucket Keys for SSE-KMS for the
+ # replica bucket.
replica_bucket_key_enabled = false
- # The lifecycle rules for the replica bucket. See var.lifecycle_rules for details.
+ # The lifecycle rules for the replica bucket. See var.lifecycle_rules for
+ # details.
replica_bucket_lifecycle_rules = {}
- # Configure who will be the default owner of objects uploaded to the replica S3
- # bucket: must be one of BucketOwnerPreferred (the bucket owner owns objects),
- # ObjectWriter (the writer of each object owns that object). Note that this
- # setting only takes effect if the object is uploaded with the
+ # Configure who will be the default owner of objects uploaded to the replica
+ # S3 bucket: must be one of BucketOwnerPreferred (the bucket owner owns
+ # objects), ObjectWriter (the writer of each object owns that object). Note
+ # that this setting only takes effect if the object is uploaded with the
# bucket-owner-full-control canned ACL. See
- # https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html for
- # more info.
+ # https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html
+ # for more info.
replica_bucket_ownership = "ObjectWriter"
# The IAM policy to apply to the replica S3 bucket. You can use this to grant
- # read/write access. This should be a map, where each key is a unique statement ID
- # (SID), and each value is an object that contains the parameters defined in the
- # comment above.
+ # read/write access. This should be a map, where each key is a unique
+ # statement ID (SID), and each value is an object that contains the parameters
+ # defined in the comment above.
replica_bucket_policy_statements = {}
# Set to true to enable server-side encryption for the replica bucket. You can
@@ -278,24 +279,24 @@ module "s_3_bucket" {
# The AWS region for the replica bucket.
replica_region = null
- # The server-side encryption algorithm to use on the replica bucket. Valid values
- # are AES256 and aws:kms. To disable server-side encryption, set
+ # The server-side encryption algorithm to use on the replica bucket. Valid
+ # values are AES256 and aws:kms. To disable server-side encryption, set
# var.replica_enable_sse to false.
replica_sse_algorithm = "aws:kms"
- # The ARN of the IAM role for Amazon S3 to assume when replicating objects. Only
- # used if replication_bucket is specified.
+ # The ARN of the IAM role for Amazon S3 to assume when replicating objects.
+ # Only used if replication_bucket is specified.
replication_role = null
# The rules for managing replication. Only used if replication_bucket is
# specified. This should be a map, where the key is a unique ID for each
- # replication rule and the value is an object of the form explained in a comment
- # above.
+ # replication rule and the value is an object of the form explained in a
+ # comment above.
replication_rules = {}
- # A map of tags to apply to the S3 Bucket. These tags will also be applied to the
- # access logging and replica buckets (if any). The key is the tag name and the
- # value is the tag value.
+ # A map of tags to apply to the S3 Bucket. These tags will also be applied to
+ # the access logging and replica buckets (if any). The key is the tag name and
+ # the value is the tag value.
tags = {}
}
@@ -313,7 +314,7 @@ module "s_3_bucket" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/s3-bucket?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/s3-bucket?ref=v0.104.12"
}
inputs = {
@@ -322,34 +323,34 @@ inputs = {
# REQUIRED VARIABLES
# ----------------------------------------------------------------------------------------------------
- # What to name the S3 bucket. Note that S3 bucket names must be globally unique
- # across all AWS users!
+ # What to name the S3 bucket. Note that S3 bucket names must be globally
+ # unique across all AWS users!
primary_bucket =
# ----------------------------------------------------------------------------------------------------
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # The S3 bucket where access logs for this bucket should be stored. Set to null to
- # disable access logging.
+ # The S3 bucket where access logs for this bucket should be stored. Set to
+ # null to disable access logging.
access_logging_bucket = null
# The lifecycle rules for the access logs bucket. See var.lifecycle_rules for
# details.
access_logging_bucket_lifecycle_rules = {}
- # Configure who will be the default owner of objects uploaded to the access logs
- # S3 bucket: must be one of BucketOwnerPreferred (the bucket owner owns objects),
- # ObjectWriter (the writer of each object owns that object). Note that this
- # setting only takes effect if the object is uploaded with the
+ # Configure who will be the default owner of objects uploaded to the access
+ # logs S3 bucket: must be one of BucketOwnerPreferred (the bucket owner owns
+ # objects), ObjectWriter (the writer of each object owns that object). Note
+ # that this setting only takes effect if the object is uploaded with the
# bucket-owner-full-control canned ACL. See
- # https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html for
- # more info.
+ # https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html
+ # for more info.
access_logging_bucket_ownership = "ObjectWriter"
- # The IAM policy to apply to the S3 bucket used to store access logs. You can use
- # this to grant read/write access. This should be a map, where each key is a
- # unique statement ID (SID), and each value is an object that contains the
+ # The IAM policy to apply to the S3 bucket used to store access logs. You can
+ # use this to grant read/write access. This should be a map, where each key is
+ # a unique statement ID (SID), and each value is an object that contains the
# parameters defined in the comment above.
access_logging_bucket_policy_statements = {}
@@ -357,32 +358,32 @@ inputs = {
# access_logging_bucket. Only used if access_logging_bucket is specified.
access_logging_prefix = null
- # The canned ACL to apply. See comment above for the list of possible ACLs. If not
- # `null` bucket_ownership cannot be BucketOwnerEnforced
+ # The canned ACL to apply. See comment above for the list of possible ACLs. If
+ # not `null` bucket_ownership cannot be BucketOwnerEnforced
acl = null
# Optional whether or not to use Amazon S3 Bucket Keys for SSE-KMS.
bucket_key_enabled = false
- # Optional KMS key to use for encrypting data in the S3 bucket. If null, data in
- # S3 will be encrypted using the default aws/s3 key. If provided, the key policy
- # of the provided key must allow whoever is writing to this bucket to use that
- # key.
+ # Optional KMS key to use for encrypting data in the S3 bucket. If null, data
+ # in S3 will be encrypted using the default aws/s3 key. If provided, the key
+ # policy of the provided key must allow whoever is writing to this bucket to
+ # use that key.
bucket_kms_key_arn = null
- # Configure who will be the default owner of objects uploaded to this S3 bucket:
- # must be one of BucketOwnerPreferred (the bucket owner owns objects),
+ # Configure who will be the default owner of objects uploaded to this S3
+ # bucket: must be one of BucketOwnerPreferred (the bucket owner owns objects),
# ObjectWriter (the writer of each object owns that object). Note that this
# setting only takes effect if the object is uploaded with the
# bucket-owner-full-control canned ACL. See
- # https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html for
- # more info.
+ # https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html
+ # for more info.
bucket_ownership = "ObjectWriter"
- # The IAM policy to apply to this S3 bucket. You can use this to grant read/write
- # access. This should be a map, where each key is a unique statement ID (SID), and
- # each value is an object that contains the parameters defined in the comment
- # above.
+ # The IAM policy to apply to this S3 bucket. You can use this to grant
+ # read/write access. This should be a map, where each key is a unique
+ # statement ID (SID), and each value is an object that contains the parameters
+ # defined in the comment above.
bucket_policy_statements = {}
# The server-side encryption algorithm to use on the bucket. Valid values are
@@ -393,8 +394,8 @@ inputs = {
# CORS rules to set on this S3 bucket
cors_rules = []
- # Set to true to enable server-side encryption for this bucket. You can control
- # the algorithm using var.sse_algorithm.
+ # Set to true to enable server-side encryption for this bucket. You can
+ # control the algorithm using var.sse_algorithm.
enable_sse = true
# Set to true to enable versioning for this bucket. If enabled, instead of
@@ -402,101 +403,102 @@ inputs = {
# object, so all the old values are retained.
enable_versioning = true
- # If set to true, when you run 'terraform destroy', delete all objects from the
- # logs bucket so that the bucket can be destroyed without error. Warning: these
- # objects are not recoverable so only use this if you're absolutely sure you want
- # to permanently delete everything!
+ # If set to true, when you run 'terraform destroy', delete all objects from
+ # the logs bucket so that the bucket can be destroyed without error. Warning:
+ # these objects are not recoverable so only use this if you're absolutely sure
+ # you want to permanently delete everything!
force_destroy_logs = false
- # If set to true, when you run 'terraform destroy', delete all objects from the
- # primary bucket so that the bucket can be destroyed without error. Warning: these
- # objects are not recoverable so only use this if you're absolutely sure you want
- # to permanently delete everything!
+ # If set to true, when you run 'terraform destroy', delete all objects from
+ # the primary bucket so that the bucket can be destroyed without error.
+ # Warning: these objects are not recoverable so only use this if you're
+ # absolutely sure you want to permanently delete everything!
force_destroy_primary = false
- # If set to true, when you run 'terraform destroy', delete all objects from the
- # replica bucket so that the bucket can be destroyed without error. Warning: these
- # objects are not recoverable so only use this if you're absolutely sure you want
- # to permanently delete everything!
+ # If set to true, when you run 'terraform destroy', delete all objects from
+ # the replica bucket so that the bucket can be destroyed without error.
+ # Warning: these objects are not recoverable so only use this if you're
+ # absolutely sure you want to permanently delete everything!
force_destroy_replica = false
# The lifecycle rules for this S3 bucket. These can be used to change storage
- # types or delete objects based on customizable rules. This should be a map, where
- # each key is a unique ID for the lifecycle rule, and each value is an object that
- # contains the parameters defined in the comment above.
+ # types or delete objects based on customizable rules. This should be a map,
+ # where each key is a unique ID for the lifecycle rule, and each value is an
+ # object that contains the parameters defined in the comment above.
lifecycle_rules = {}
# Enable MFA delete for either 'Change the versioning state of your bucket' or
# 'Permanently delete an object version'. This cannot be used to toggle this
- # setting but is available to allow managed buckets to reflect the state in AWS.
- # Only used if enable_versioning is true. For instructions on how to enable MFA
- # Delete, check out the README from the terraform-aws-security/private-s3-bucket
- # module.
+ # setting but is available to allow managed buckets to reflect the state in
+ # AWS. Only used if enable_versioning is true. For instructions on how to
+ # enable MFA Delete, check out the README from the
+ # terraform-aws-security/private-s3-bucket module.
mfa_delete = false
- # The number of days that you want to specify for the default retention period for
- # Object Locking. Only one of object_lock_days or object_lock_years can be
+ # The number of days that you want to specify for the default retention period
+ # for Object Locking. Only one of object_lock_days or object_lock_years can be
# configured. Only used if object_lock_enabled and
# object_lock_default_retention_enabled are true.
object_lock_days = null
- # Set to true to configure a default retention period for object locks when Object
- # Locking is enabled. When disabled, objects will not be protected with locking by
- # default unless explicitly configured at object creation time. Only used if
- # object_lock_enabled is true.
+ # Set to true to configure a default retention period for object locks when
+ # Object Locking is enabled. When disabled, objects will not be protected with
+ # locking by default unless explicitly configured at object creation time.
+ # Only used if object_lock_enabled is true.
object_lock_default_retention_enabled = true
- # Set to true to enable Object Locking. This prevents objects from being deleted
- # for a customizable period of time. Note that this MUST be configured at bucket
- # creation time - you cannot update an existing bucket to enable object locking
- # unless you go through AWS support. Additionally, this is not reversible - once a
- # bucket is created with object lock enabled, you cannot disable object locking
- # even with this setting. Note that enabling object locking will automatically
- # enable bucket versioning.
+ # Set to true to enable Object Locking. This prevents objects from being
+ # deleted for a customizable period of time. Note that this MUST be configured
+ # at bucket creation time - you cannot update an existing bucket to enable
+ # object locking unless you go through AWS support. Additionally, this is not
+ # reversible - once a bucket is created with object lock enabled, you cannot
+ # disable object locking even with this setting. Note that enabling object
+ # locking will automatically enable bucket versioning.
object_lock_enabled = false
- # The default Object Lock retention mode you want to apply to new objects placed
- # in this bucket. Valid values are GOVERNANCE and COMPLIANCE. Only used if
- # object_lock_enabled and object_lock_default_retention_enabled are true.
+ # The default Object Lock retention mode you want to apply to new objects
+ # placed in this bucket. Valid values are GOVERNANCE and COMPLIANCE. Only used
+ # if object_lock_enabled and object_lock_default_retention_enabled are true.
object_lock_mode = null
- # The number of years that you want to specify for the default retention period
- # for Object Locking. Only one of object_lock_days or object_lock_years can be
- # configured. Only used if object_lock_enabled and
+ # The number of years that you want to specify for the default retention
+ # period for Object Locking. Only one of object_lock_days or object_lock_years
+ # can be configured. Only used if object_lock_enabled and
# object_lock_default_retention_enabled are true.
object_lock_years = null
- # The S3 bucket that will be the replica of this bucket. Set to null to disable
- # replication.
+ # The S3 bucket that will be the replica of this bucket. Set to null to
+ # disable replication.
replica_bucket = null
- # The canned ACL to apply. See comment above for the list of possible ACLs. If not
- # `null` bucket_ownership cannot be BucketOwnerEnforced
+ # The canned ACL to apply. See comment above for the list of possible ACLs. If
+ # not `null` bucket_ownership cannot be BucketOwnerEnforced
replica_bucket_acl = null
# If set to true, replica bucket will be expected to already exist.
replica_bucket_already_exists = false
- # Optional whether or not to use Amazon S3 Bucket Keys for SSE-KMS for the replica
- # bucket.
+ # Optional whether or not to use Amazon S3 Bucket Keys for SSE-KMS for the
+ # replica bucket.
replica_bucket_key_enabled = false
- # The lifecycle rules for the replica bucket. See var.lifecycle_rules for details.
+ # The lifecycle rules for the replica bucket. See var.lifecycle_rules for
+ # details.
replica_bucket_lifecycle_rules = {}
- # Configure who will be the default owner of objects uploaded to the replica S3
- # bucket: must be one of BucketOwnerPreferred (the bucket owner owns objects),
- # ObjectWriter (the writer of each object owns that object). Note that this
- # setting only takes effect if the object is uploaded with the
+ # Configure who will be the default owner of objects uploaded to the replica
+ # S3 bucket: must be one of BucketOwnerPreferred (the bucket owner owns
+ # objects), ObjectWriter (the writer of each object owns that object). Note
+ # that this setting only takes effect if the object is uploaded with the
# bucket-owner-full-control canned ACL. See
- # https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html for
- # more info.
+ # https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html
+ # for more info.
replica_bucket_ownership = "ObjectWriter"
# The IAM policy to apply to the replica S3 bucket. You can use this to grant
- # read/write access. This should be a map, where each key is a unique statement ID
- # (SID), and each value is an object that contains the parameters defined in the
- # comment above.
+ # read/write access. This should be a map, where each key is a unique
+ # statement ID (SID), and each value is an object that contains the parameters
+ # defined in the comment above.
replica_bucket_policy_statements = {}
# Set to true to enable server-side encryption for the replica bucket. You can
@@ -506,24 +508,24 @@ inputs = {
# The AWS region for the replica bucket.
replica_region = null
- # The server-side encryption algorithm to use on the replica bucket. Valid values
- # are AES256 and aws:kms. To disable server-side encryption, set
+ # The server-side encryption algorithm to use on the replica bucket. Valid
+ # values are AES256 and aws:kms. To disable server-side encryption, set
# var.replica_enable_sse to false.
replica_sse_algorithm = "aws:kms"
- # The ARN of the IAM role for Amazon S3 to assume when replicating objects. Only
- # used if replication_bucket is specified.
+ # The ARN of the IAM role for Amazon S3 to assume when replicating objects.
+ # Only used if replication_bucket is specified.
replication_role = null
# The rules for managing replication. Only used if replication_bucket is
# specified. This should be a map, where the key is a unique ID for each
- # replication rule and the value is an object of the form explained in a comment
- # above.
+ # replication rule and the value is an object of the form explained in a
+ # comment above.
replication_rules = {}
- # A map of tags to apply to the S3 Bucket. These tags will also be applied to the
- # access logging and replica buckets (if any). The key is the tag name and the
- # value is the tag value.
+ # A map of tags to apply to the S3 Bucket. These tags will also be applied to
+ # the access logging and replica buckets (if any). The key is the tag name and
+ # the value is the tag value.
tags = {}
}
@@ -1214,11 +1216,11 @@ The name of the replica S3 bucket.
diff --git a/docs/reference/services/landing-zone/aws-app-account-baseline-wrapper.md b/docs/reference/services/landing-zone/aws-app-account-baseline-wrapper.md
index dced2f27ec..6bdeeaf635 100644
--- a/docs/reference/services/landing-zone/aws-app-account-baseline-wrapper.md
+++ b/docs/reference/services/landing-zone/aws-app-account-baseline-wrapper.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Account Baseline for app accounts
-View Source
+View Source
Release Notes
@@ -57,13 +57,13 @@ If you’ve never used the Service Catalog before, make sure to read
* Learn more about each individual module, click the link in the [Features](#features) section.
* [How to configure a production-grade AWS account structure](https://docs.gruntwork.io/guides/build-it-yourself/landing-zone/)
-* [How to use multi-region services](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/landingzone/account-baseline-root/core-concepts.md#how-to-use-multi-region-services)
+* [How to use multi-region services](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/landingzone/account-baseline-root/core-concepts.md#how-to-use-multi-region-services)
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/test): Automated tests for the modules and examples.
## Deploy
@@ -71,7 +71,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing/landingzone folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing/landingzone): The
+* [examples/for-learning-and-testing/landingzone folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing/landingzone): The
`examples/for-learning-and-testing/landingzone` folder contains standalone sample code optimized for learning,
experimenting, and testing (but not direct production usage).
@@ -79,7 +79,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end integrated tech stack on top of the Gruntwork Service Catalog.
@@ -100,7 +100,7 @@ If you want to deploy this repo in production, check out the following resources
module "account_baseline_app" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/account-baseline-app?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/account-baseline-app?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -114,85 +114,87 @@ module "account_baseline_app" {
# GuardDuty.
aws_region =
- # Creates resources in the specified regions. The best practice is to enable AWS
- # Config in all enabled regions in your AWS account. This variable must NOT be set
- # to null or empty. Otherwise, we won't know which regions to use and authenticate
- # to, and may use some not enabled in your AWS account (e.g., GovCloud, China,
- # etc). To get the list of regions enabled in your AWS account, you can use the
- # AWS CLI: aws ec2 describe-regions.
+ # Creates resources in the specified regions. The best practice is to enable
+ # AWS Config in all enabled regions in your AWS account. This variable must
+ # NOT be set to null or empty. Otherwise, we won't know which regions to use
+ # and authenticate to, and may use some not enabled in your AWS account (e.g.,
+ # GovCloud, China, etc). To get the list of regions enabled in your AWS
+ # account, you can use the AWS CLI: aws ec2 describe-regions.
config_opt_in_regions =
- # Creates resources in the specified regions. The best practice is to enable EBS
- # Encryption in all enabled regions in your AWS account. This variable must NOT be
- # set to null or empty. Otherwise, we won't know which regions to use and
- # authenticate to, and may use some not enabled in your AWS account (e.g.,
- # GovCloud, China, etc). To get the list of regions enabled in your AWS account,
- # you can use the AWS CLI: aws ec2 describe-regions. The value provided for
- # global_recorder_region must be in this list.
+ # Creates resources in the specified regions. The best practice is to enable
+ # EBS Encryption in all enabled regions in your AWS account. This variable
+ # must NOT be set to null or empty. Otherwise, we won't know which regions to
+ # use and authenticate to, and may use some not enabled in your AWS account
+ # (e.g., GovCloud, China, etc). To get the list of regions enabled in your AWS
+ # account, you can use the AWS CLI: aws ec2 describe-regions. The value
+ # provided for global_recorder_region must be in this list.
ebs_opt_in_regions =
# Creates resources in the specified regions. The best practice is to enable
- # GuardDuty in all enabled regions in your AWS account. This variable must NOT be
- # set to null or empty. Otherwise, we won't know which regions to use and
+ # GuardDuty in all enabled regions in your AWS account. This variable must NOT
+ # be set to null or empty. Otherwise, we won't know which regions to use and
# authenticate to, and may use some not enabled in your AWS account (e.g.,
- # GovCloud, China, etc). To get the list of regions enabled in your AWS account,
- # you can use the AWS CLI: aws ec2 describe-regions. The value provided for
- # global_recorder_region must be in this list.
+ # GovCloud, China, etc). To get the list of regions enabled in your AWS
+ # account, you can use the AWS CLI: aws ec2 describe-regions. The value
+ # provided for global_recorder_region must be in this list.
guardduty_opt_in_regions =
- # Creates resources in the specified regions. The best practice is to enable IAM
- # Access Analyzer in all enabled regions in your AWS account. This variable must
- # NOT be set to null or empty. Otherwise, we won't know which regions to use and
- # authenticate to, and may use some not enabled in your AWS account (e.g.,
- # GovCloud, China, etc). To get the list of regions enabled in your AWS account,
- # you can use the AWS CLI: aws ec2 describe-regions. The value provided for
- # global_recorder_region must be in this list.
+ # Creates resources in the specified regions. The best practice is to enable
+ # IAM Access Analyzer in all enabled regions in your AWS account. This
+ # variable must NOT be set to null or empty. Otherwise, we won't know which
+ # regions to use and authenticate to, and may use some not enabled in your AWS
+ # account (e.g., GovCloud, China, etc). To get the list of regions enabled in
+ # your AWS account, you can use the AWS CLI: aws ec2 describe-regions. The
+ # value provided for global_recorder_region must be in this list.
iam_access_analyzer_opt_in_regions =
# Creates resources in the specified regions. This variable must NOT be set to
- # null or empty. Otherwise, we won't know which regions to use and authenticate
- # to, and may use some not enabled in your AWS account (e.g., GovCloud, China,
- # etc). To get the list of regions enabled in your AWS account, you can use the
- # AWS CLI: aws ec2 describe-regions. The value provided for global_recorder_region
- # must be in this list.
+ # null or empty. Otherwise, we won't know which regions to use and
+ # authenticate to, and may use some not enabled in your AWS account (e.g.,
+ # GovCloud, China, etc). To get the list of regions enabled in your AWS
+ # account, you can use the AWS CLI: aws ec2 describe-regions. The value
+ # provided for global_recorder_region must be in this list.
kms_cmk_opt_in_regions =
- # The name used to prefix AWS Config and Cloudtrail resources, including the S3
- # bucket names and SNS topics used for each.
+ # The name used to prefix AWS Config and Cloudtrail resources, including the
+ # S3 bucket names and SNS topics used for each.
name_prefix =
# ----------------------------------------------------------------------------------------------------
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Map of additional managed rules to add. The key is the name of the rule (e.g.
- # ´acm-certificate-expiration-check´) and the value is an object specifying the
- # rule details
+ # Map of additional managed rules to add. The key is the name of the rule
+ # (e.g. ´acm-certificate-expiration-check´) and the value is an object
+ # specifying the rule details
additional_config_rules = {}
- # Map of github repositories to the list of branches that are allowed to assume
- # the IAM role. The repository should be encoded as org/repo-name (e.g.,
- # gruntwork-io/terrraform-aws-ci). Allows GitHub Actions to assume the auto deploy
- # IAM role using an OpenID Connect Provider for the given repositories. Refer to
- # the docs for github-actions-iam-role for more information. Note that this is
- # mutually exclusive with var.allow_auto_deploy_from_other_account_arns. Only used
- # if var.enable_github_actions_access is true.
+ # Map of github repositories to the list of branches that are allowed to
+ # assume the IAM role. The repository should be encoded as org/repo-name
+ # (e.g., gruntwork-io/terrraform-aws-ci). Allows GitHub Actions to assume the
+ # auto deploy IAM role using an OpenID Connect Provider for the given
+ # repositories. Refer to the docs for github-actions-iam-role for more
+ # information. Note that this is mutually exclusive with
+ # var.allow_auto_deploy_from_other_account_arns. Only used if
+ # var.enable_github_actions_access is true.
allow_auto_deploy_from_github_actions_for_sources = {}
- # A list of IAM ARNs from other AWS accounts that will be allowed to assume the
- # auto deploy IAM role that has the permissions in var.auto_deploy_permissions.
+ # A list of IAM ARNs from other AWS accounts that will be allowed to assume
+ # the auto deploy IAM role that has the permissions in
+ # var.auto_deploy_permissions.
allow_auto_deploy_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_auto_deploy_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed full (read and
- # write) access to the billing info for this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed full (read
+ # and write) access to the billing info for this account.
allow_billing_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_billing_access_iam_role_permissions_boundary = null
# If true, an IAM Policy that grants access to CloudTrail will be honored. If
@@ -200,316 +202,323 @@ module "account_baseline_app" {
# CloudTrail and any IAM Policy grants will be ignored. (true or false)
allow_cloudtrail_access_with_iam = true
- # A list of IAM ARNs from other AWS accounts that will be allowed full (read and
- # write) access to the services in this account specified in
+ # A list of IAM ARNs from other AWS accounts that will be allowed full (read
+ # and write) access to the services in this account specified in
# var.dev_permitted_services.
allow_dev_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_dev_access_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed full (read and
- # write) access to this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed full (read
+ # and write) access to this account.
allow_full_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_full_access_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed read access to
- # the logs in CloudTrail, AWS Config, and CloudWatch for this account. If
+ # A list of IAM ARNs from other AWS accounts that will be allowed read access
+ # to the logs in CloudTrail, AWS Config, and CloudWatch for this account. If
# var.cloudtrail_kms_key_arn is specified, will also be given permissions to
# decrypt with the KMS CMK that is used to encrypt CloudTrail logs.
allow_logs_access_from_other_account_arns = []
- # A list of IAM ARNs from other AWS accounts that will be allowed read-only access
- # to this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed read-only
+ # access to this account.
allow_read_only_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_read_only_access_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed read access to
- # IAM groups and publish SSH keys. This is used for ssh-grunt.
+ # A list of IAM ARNs from other AWS accounts that will be allowed read access
+ # to IAM groups and publish SSH keys. This is used for ssh-grunt.
allow_ssh_grunt_access_from_other_account_arns = []
- # A list of IAM ARNs from other AWS accounts that will be allowed access to AWS
- # support for this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed access to
+ # AWS support for this account.
allow_support_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_support_access_iam_role_permissions_boundary = null
- # A list of IAM permissions (e.g. ec2:*) that will be added to an IAM Group for
- # doing automated deployments. NOTE: If var.should_create_iam_group_auto_deploy is
- # true, the list must have at least one element (e.g. '*').
+ # A list of IAM permissions (e.g. ec2:*) that will be added to an IAM Group
+ # for doing automated deployments. NOTE: If
+ # var.should_create_iam_group_auto_deploy is true, the list must have at least
+ # one element (e.g. '*').
auto_deploy_permissions = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
aws_config_iam_role_permissions_boundary = null
# Whether or not to allow kms:DescribeKey to external AWS accounts with write
- # access to the CloudTrail bucket. This is useful during deployment so that you
- # don't have to pass around the KMS key ARN.
+ # access to the CloudTrail bucket. This is useful during deployment so that
+ # you don't have to pass around the KMS key ARN.
cloudtrail_allow_kms_describe_key_to_external_aws_accounts = false
- # Specify the name of the CloudWatch Logs group to publish the CloudTrail logs to.
- # This log group exists in the current account. Set this value to `null` to avoid
- # publishing the trail logs to the logs group. The recommended configuration for
- # CloudTrail is (a) for each child account to aggregate its logs in an S3 bucket
- # in a single central account, such as a logs account and (b) to also store 14
- # days work of logs in CloudWatch in the child account itself for local debugging.
+ # Specify the name of the CloudWatch Logs group to publish the CloudTrail logs
+ # to. This log group exists in the current account. Set this value to `null`
+ # to avoid publishing the trail logs to the logs group. The recommended
+ # configuration for CloudTrail is (a) for each child account to aggregate its
+ # logs in an S3 bucket in a single central account, such as a logs account and
+ # (b) to also store 14 days work of logs in CloudWatch in the child account
+ # itself for local debugging.
cloudtrail_cloudwatch_logs_group_name = "cloudtrail-logs"
# If true, logging of data events will be enabled.
cloudtrail_data_logging_enabled = false
- # Specify if you want your event selector to include management events for your
- # trail.
+ # Specify if you want your event selector to include management events for
+ # your trail.
cloudtrail_data_logging_include_management_events = true
- # Specify if you want your trail to log read-only events, write-only events, or
- # all. Possible values are: ReadOnly, WriteOnly, All.
+ # Specify if you want your trail to log read-only events, write-only events,
+ # or all. Possible values are: ReadOnly, WriteOnly, All.
cloudtrail_data_logging_read_write_type = "All"
- # Data resources for which to log data events. This should be a map, where each
- # key is a data resource type, and each value is a list of data resource values.
- # Possible values for data resource types are: AWS::S3::Object,
- # AWS::Lambda::Function and AWS::DynamoDB::Table. See the 'data_resource' block
- # within the 'event_selector' block of the 'aws_cloudtrail' resource for context:
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # trail#data_resource.
+ # Data resources for which to log data events. This should be a map, where
+ # each key is a data resource type, and each value is a list of data resource
+ # values. Possible values for data resource types are: AWS::S3::Object,
+ # AWS::Lambda::Function and AWS::DynamoDB::Table. See the 'data_resource'
+ # block within the 'event_selector' block of the 'aws_cloudtrail' resource for
+ # context:
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudtrail#data_resource.
cloudtrail_data_logging_resources = {}
- # Provide a list of AWS account IDs that will be allowed to send CloudTrail logs
- # to this account. This is only required if you are aggregating CloudTrail logs in
- # this account (e.g., this is the logs account) from other accounts.
+ # Provide a list of AWS account IDs that will be allowed to send CloudTrail
+ # logs to this account. This is only required if you are aggregating
+ # CloudTrail logs in this account (e.g., this is the logs account) from other
+ # accounts.
cloudtrail_external_aws_account_ids_with_write_access = []
- # If set to true, when you run 'terraform destroy', delete all objects from the
- # bucket so that the bucket can be destroyed without error. Warning: these objects
- # are not recoverable so only use this if you're absolutely sure you want to
- # permanently delete everything!
+ # If set to true, when you run 'terraform destroy', delete all objects from
+ # the bucket so that the bucket can be destroyed without error. Warning: these
+ # objects are not recoverable so only use this if you're absolutely sure you
+ # want to permanently delete everything!
cloudtrail_force_destroy = false
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
cloudtrail_iam_role_permissions_boundary = null
- # All CloudTrail Logs will be encrypted with a KMS CMK (Customer Master Key) that
- # governs access to write API calls older than 7 days and all read API calls. If
- # you are aggregating CloudTrail logs and creating the CMK in this account (e.g.,
- # if this is the logs account), you MUST specify at least one IAM user (or other
- # IAM ARN) that will be given administrator permissions for CMK, including the
- # ability to change who can access this CMK and the extended log data it protects.
- # If you are aggregating CloudTrail logs in another AWS account and the CMK
- # already exists (e.g., if this is the stage or prod account), set this parameter
- # to an empty list.
+ # All CloudTrail Logs will be encrypted with a KMS CMK (Customer Master Key)
+ # that governs access to write API calls older than 7 days and all read API
+ # calls. If you are aggregating CloudTrail logs and creating the CMK in this
+ # account (e.g., if this is the logs account), you MUST specify at least one
+ # IAM user (or other IAM ARN) that will be given administrator permissions for
+ # CMK, including the ability to change who can access this CMK and the
+ # extended log data it protects. If you are aggregating CloudTrail logs in
+ # another AWS account and the CMK already exists (e.g., if this is the stage
+ # or prod account), set this parameter to an empty list.
cloudtrail_kms_key_administrator_iam_arns = []
- # All CloudTrail Logs will be encrypted with a KMS CMK (Customer Master Key) that
- # governs access to write API calls older than 7 days and all read API calls. If
- # that CMK already exists (e.g., if this is the stage or prod account and you want
- # to use a CMK that already exists in the logs account), set this to the ARN of
- # that CMK. Otherwise (e.g., if this is the logs account), set this to null, and a
- # new CMK will be created.
+ # All CloudTrail Logs will be encrypted with a KMS CMK (Customer Master Key)
+ # that governs access to write API calls older than 7 days and all read API
+ # calls. If that CMK already exists (e.g., if this is the stage or prod
+ # account and you want to use a CMK that already exists in the logs account),
+ # set this to the ARN of that CMK. Otherwise (e.g., if this is the logs
+ # account), set this to null, and a new CMK will be created.
cloudtrail_kms_key_arn = null
- # If the kms_key_arn provided is an alias or alias ARN, then this must be set to
- # true so that the module will exchange the alias for a CMK ARN. Setting this to
- # true and using aliases requires
- # var.cloudtrail_allow_kms_describe_key_to_external_aws_accounts to also be true
- # for multi-account scenarios.
+ # If the kms_key_arn provided is an alias or alias ARN, then this must be set
+ # to true so that the module will exchange the alias for a CMK ARN. Setting
+ # this to true and using aliases requires
+ # var.cloudtrail_allow_kms_describe_key_to_external_aws_accounts to also be
+ # true for multi-account scenarios.
cloudtrail_kms_key_arn_is_alias = false
- # Additional service principals beyond CloudTrail that should have access to the
- # KMS key used to encrypt the logs. This is useful for granting access to the logs
- # for the purposes of constructing metric filters.
+ # Additional service principals beyond CloudTrail that should have access to
+ # the KMS key used to encrypt the logs. This is useful for granting access to
+ # the logs for the purposes of constructing metric filters.
cloudtrail_kms_key_service_principals = []
- # All CloudTrail Logs will be encrypted with a KMS CMK (Customer Master Key) that
- # governs access to write API calls older than 7 days and all read API calls. If
- # you are aggregating CloudTrail logs and creating the CMK in this account (e.g.,
- # this is the logs account), you MUST specify at least one IAM user (or other IAM
- # ARN) that will be given user access to this CMK, which will allow this user to
- # read CloudTrail Logs. If you are aggregating CloudTrail logs in another AWS
- # account and the CMK already exists, set this parameter to an empty list (e.g.,
- # if this is the stage or prod account).
+ # All CloudTrail Logs will be encrypted with a KMS CMK (Customer Master Key)
+ # that governs access to write API calls older than 7 days and all read API
+ # calls. If you are aggregating CloudTrail logs and creating the CMK in this
+ # account (e.g., this is the logs account), you MUST specify at least one IAM
+ # user (or other IAM ARN) that will be given user access to this CMK, which
+ # will allow this user to read CloudTrail Logs. If you are aggregating
+ # CloudTrail logs in another AWS account and the CMK already exists, set this
+ # parameter to an empty list (e.g., if this is the stage or prod account).
cloudtrail_kms_key_user_iam_arns = []
- # After this number of days, log files should be transitioned from S3 to Glacier.
- # Enter 0 to never archive log data.
+ # After this number of days, log files should be transitioned from S3 to
+ # Glacier. Enter 0 to never archive log data.
cloudtrail_num_days_after_which_archive_log_data = 30
- # After this number of days, log files should be deleted from S3. Enter 0 to never
- # delete log data.
+ # After this number of days, log files should be deleted from S3. Enter 0 to
+ # never delete log data.
cloudtrail_num_days_after_which_delete_log_data = 365
- # After this number of days, logs stored in CloudWatch will be deleted. Possible
- # values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827,
- # 3653, and 0 (default). When set to 0, logs will be retained indefinitely.
+ # After this number of days, logs stored in CloudWatch will be deleted.
+ # Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400,
+ # 545, 731, 1827, 3653, and 0 (default). When set to 0, logs will be retained
+ # indefinitely.
cloudtrail_num_days_to_retain_cloudwatch_logs = 0
# Set to false to create an S3 bucket of name var.cloudtrail_s3_bucket_name in
- # this account for storing CloudTrail logs (e.g., if this is the logs account).
- # Set to true to assume the bucket specified in var.cloudtrail_s3_bucket_name
- # already exists in another AWS account (e.g., if this is the stage or prod
- # account and var.cloudtrail_s3_bucket_name is the name of a bucket in the logs
- # account).
+ # this account for storing CloudTrail logs (e.g., if this is the logs
+ # account). Set to true to assume the bucket specified in
+ # var.cloudtrail_s3_bucket_name already exists in another AWS account (e.g.,
+ # if this is the stage or prod account and var.cloudtrail_s3_bucket_name is
+ # the name of a bucket in the logs account).
cloudtrail_s3_bucket_already_exists = true
# Optional whether or not to use Amazon S3 Bucket Keys for SSE-KMS.
cloudtrail_s3_bucket_key_enabled = false
- # The name of the S3 Bucket where CloudTrail logs will be stored. This could be a
- # bucket in this AWS account (e.g., if this is the logs account) or the name of a
- # bucket in another AWS account where logs should be sent (e.g., if this is the
- # stage or prod account and you're specifying the name of a bucket in the logs
- # account).
+ # The name of the S3 Bucket where CloudTrail logs will be stored. This could
+ # be a bucket in this AWS account (e.g., if this is the logs account) or the
+ # name of a bucket in another AWS account where logs should be sent (e.g., if
+ # this is the stage or prod account and you're specifying the name of a bucket
+ # in the logs account).
cloudtrail_s3_bucket_name = null
# Enable MFA delete for either 'Change the versioning state of your bucket' or
- # 'Permanently delete an object version'. This setting only applies to the bucket
- # used to storage Cloudtrail data. This cannot be used to toggle this setting but
- # is available to allow managed buckets to reflect the state in AWS. For
- # instructions on how to enable MFA Delete, check out the README from the
- # terraform-aws-security/private-s3-bucket module.
+ # 'Permanently delete an object version'. This setting only applies to the
+ # bucket used to storage Cloudtrail data. This cannot be used to toggle this
+ # setting but is available to allow managed buckets to reflect the state in
+ # AWS. For instructions on how to enable MFA Delete, check out the README from
+ # the terraform-aws-security/private-s3-bucket module.
cloudtrail_s3_mfa_delete = false
# Tags to apply to the CloudTrail resources.
cloudtrail_tags = {}
# Set to true to send the AWS Config data to another account (e.g., a logs
- # account) for aggregation purposes. You must set the ID of that other account via
- # the config_central_account_id variable. This redundant variable has to exist
- # because Terraform does not allow computed data in count and for_each parameters
- # and var.config_central_account_id may be computed if its the ID of a
- # newly-created AWS account.
+ # account) for aggregation purposes. You must set the ID of that other account
+ # via the config_central_account_id variable. This redundant variable has to
+ # exist because Terraform does not allow computed data in count and for_each
+ # parameters and var.config_central_account_id may be computed if its the ID
+ # of a newly-created AWS account.
config_aggregate_config_data_in_external_account = false
# If the S3 bucket and SNS topics used for AWS Config live in a different AWS
- # account, set this variable to the ID of that account (e.g., if this is the stage
- # or prod account, set this to the ID of the logs account). If the S3 bucket and
- # SNS topics live in this account (e.g., this is the logs account), set this
- # variable to null. Only used if
+ # account, set this variable to the ID of that account (e.g., if this is the
+ # stage or prod account, set this to the ID of the logs account). If the S3
+ # bucket and SNS topics live in this account (e.g., this is the logs account),
+ # set this variable to null. Only used if
# var.config_aggregate_config_data_in_external_account is true.
config_central_account_id = null
- # Set to true to create AWS Config rules directly in this account. Set false to
- # not create any Config rules in this account (i.e., if you created the rules at
- # the organization level already). We recommend setting this to true to use
- # account-level rules because org-level rules create a chicken-and-egg problem
- # with creating new accounts.
+ # Set to true to create AWS Config rules directly in this account. Set false
+ # to not create any Config rules in this account (i.e., if you created the
+ # rules at the organization level already). We recommend setting this to true
+ # to use account-level rules because org-level rules create a chicken-and-egg
+ # problem with creating new accounts.
config_create_account_rules = true
# Optional KMS key to use for encrypting S3 objects on the AWS Config delivery
- # channel for an externally managed S3 bucket. This must belong to the same region
- # as the destination S3 bucket. If null, AWS Config will default to encrypting the
- # delivered data with AES-256 encryption. Only used if var.should_create_s3_bucket
- # is false - otherwise, var.config_s3_bucket_kms_key_arn is used.
+ # channel for an externally managed S3 bucket. This must belong to the same
+ # region as the destination S3 bucket. If null, AWS Config will default to
+ # encrypting the delivered data with AES-256 encryption. Only used if
+ # var.should_create_s3_bucket is false - otherwise,
+ # var.config_s3_bucket_kms_key_arn is used.
config_delivery_channel_kms_key_arn = null
- # Same as var.config_delivery_channel_kms_key_arn, except the value is a name of a
- # KMS key configured with var.kms_customer_master_keys. The module created KMS key
- # for the delivery region (indexed by the name) will be used. Note that if both
- # var.config_delivery_channel_kms_key_arn and
+ # Same as var.config_delivery_channel_kms_key_arn, except the value is a name
+ # of a KMS key configured with var.kms_customer_master_keys. The module
+ # created KMS key for the delivery region (indexed by the name) will be used.
+ # Note that if both var.config_delivery_channel_kms_key_arn and
# var.config_delivery_channel_kms_key_by_name are configured, the key in
# var.config_delivery_channel_kms_key_arn will always be used.
config_delivery_channel_kms_key_by_name = null
- # If set to true, when you run 'terraform destroy', delete all objects from the
- # bucket so that the bucket can be destroyed without error. Warning: these objects
- # are not recoverable so only use this if you're absolutely sure you want to
- # permanently delete everything!
+ # If set to true, when you run 'terraform destroy', delete all objects from
+ # the bucket so that the bucket can be destroyed without error. Warning: these
+ # objects are not recoverable so only use this if you're absolutely sure you
+ # want to permanently delete everything!
config_force_destroy = false
- # Provide a list of AWS account IDs that will be allowed to send AWS Config data
- # to this account. This is only required if you are aggregating config data in
- # this account (e.g., this is the logs account) from other accounts.
+ # Provide a list of AWS account IDs that will be allowed to send AWS Config
+ # data to this account. This is only required if you are aggregating config
+ # data in this account (e.g., this is the logs account) from other accounts.
config_linked_accounts = []
- # After this number of days, log files should be transitioned from S3 to Glacier.
- # Enter 0 to never archive log data.
+ # After this number of days, log files should be transitioned from S3 to
+ # Glacier. Enter 0 to never archive log data.
config_num_days_after_which_archive_log_data = 365
- # After this number of days, log files should be deleted from S3. Enter 0 to never
- # delete log data.
+ # After this number of days, log files should be deleted from S3. Enter 0 to
+ # never delete log data.
config_num_days_after_which_delete_log_data = 730
- # Optional KMS key to use for encrypting S3 objects on the AWS Config bucket, when
- # the S3 bucket is created within this module (var.config_should_create_s3_bucket
- # is true). For encrypting S3 objects on delivery for an externally managed S3
- # bucket, refer to the var.config_delivery_channel_kms_key_arn input variable. If
- # null, data in S3 will be encrypted using the default aws/s3 key. If provided,
- # the key policy of the provided key must permit the IAM role used by AWS Config.
- # See https://docs.aws.amazon.com/sns/latest/dg/sns-key-management.html. Note that
+ # Optional KMS key to use for encrypting S3 objects on the AWS Config bucket,
+ # when the S3 bucket is created within this module
+ # (var.config_should_create_s3_bucket is true). For encrypting S3 objects on
+ # delivery for an externally managed S3 bucket, refer to the
+ # var.config_delivery_channel_kms_key_arn input variable. If null, data in S3
+ # will be encrypted using the default aws/s3 key. If provided, the key policy
+ # of the provided key must permit the IAM role used by AWS Config. See
+ # https://docs.aws.amazon.com/sns/latest/dg/sns-key-management.html. Note that
# the KMS key must reside in the global recorder region (as configured by
# var.aws_region).
config_s3_bucket_kms_key_arn = null
- # Same as var.config_s3_bucket_kms_key_arn, except the value is a name of a KMS
- # key configured with var.kms_customer_master_keys. The module created KMS key for
- # the global recorder region (indexed by the name) will be used. Note that if both
- # var.config_s3_bucket_kms_key_arn and var.config_s3_bucket_kms_key_by_name are
- # configured, the key in var.config_s3_bucket_kms_key_arn will always be used.
+ # Same as var.config_s3_bucket_kms_key_arn, except the value is a name of a
+ # KMS key configured with var.kms_customer_master_keys. The module created KMS
+ # key for the global recorder region (indexed by the name) will be used. Note
+ # that if both var.config_s3_bucket_kms_key_arn and
+ # var.config_s3_bucket_kms_key_by_name are configured, the key in
+ # var.config_s3_bucket_kms_key_arn will always be used.
config_s3_bucket_kms_key_by_name = null
- # The name of the S3 Bucket where Config items will be stored. Can be in the same
- # account or in another account.
+ # The name of the S3 Bucket where Config items will be stored. Can be in the
+ # same account or in another account.
config_s3_bucket_name = null
# Enable MFA delete for either 'Change the versioning state of your bucket' or
- # 'Permanently delete an object version'. This setting only applies to the bucket
- # used to storage AWS Config data. This cannot be used to toggle this setting but
- # is available to allow managed buckets to reflect the state in AWS. For
- # instructions on how to enable MFA Delete, check out the README from the
- # terraform-aws-security/private-s3-bucket module.
+ # 'Permanently delete an object version'. This setting only applies to the
+ # bucket used to storage AWS Config data. This cannot be used to toggle this
+ # setting but is available to allow managed buckets to reflect the state in
+ # AWS. For instructions on how to enable MFA Delete, check out the README from
+ # the terraform-aws-security/private-s3-bucket module.
config_s3_mfa_delete = false
# Set to true to create an S3 bucket of name var.config_s3_bucket_name in this
- # account for storing AWS Config data (e.g., if this is the logs account). Set to
- # false to assume the bucket specified in var.config_s3_bucket_name already exists
- # in another AWS account (e.g., if this is the stage or prod account and
- # var.config_s3_bucket_name is the name of a bucket in the logs account).
+ # account for storing AWS Config data (e.g., if this is the logs account). Set
+ # to false to assume the bucket specified in var.config_s3_bucket_name already
+ # exists in another AWS account (e.g., if this is the stage or prod account
+ # and var.config_s3_bucket_name is the name of a bucket in the logs account).
config_should_create_s3_bucket = false
# set to true to create an sns topic in this account for sending aws config
- # notifications (e.g., if this is the logs account). set to false to assume the
- # topic specified in var.config_sns_topic_name already exists in another aws
- # account (e.g., if this is the stage or prod account and
+ # notifications (e.g., if this is the logs account). set to false to assume
+ # the topic specified in var.config_sns_topic_name already exists in another
+ # aws account (e.g., if this is the stage or prod account and
# var.config_sns_topic_name is the name of an sns topic in the logs account).
config_should_create_sns_topic = false
- # Same as var.config_sns_topic_kms_key_region_map, except the value is a name of a
- # KMS key configured with var.kms_customer_master_keys. The module created KMS key
- # for each region (indexed by the name) will be used. Note that if an entry exists
- # for a region in both var.config_sns_topic_kms_key_region_map and
+ # Same as var.config_sns_topic_kms_key_region_map, except the value is a name
+ # of a KMS key configured with var.kms_customer_master_keys. The module
+ # created KMS key for each region (indexed by the name) will be used. Note
+ # that if an entry exists for a region in both
+ # var.config_sns_topic_kms_key_region_map and
# var.config_sns_topic_kms_key_by_name_region_map, then the key in
# var.config_sns_topic_kms_key_region_map will always be used.
config_sns_topic_kms_key_by_name_region_map = null
- # Optional KMS key to use for each region for configuring default encryption for
- # the SNS topic (encoded as a map from region - e.g. us-east-1 - to ARN of KMS
- # key). If null or the region key is missing, encryption will not be configured
- # for the SNS topic in that region.
+ # Optional KMS key to use for each region for configuring default encryption
+ # for the SNS topic (encoded as a map from region - e.g. us-east-1 - to ARN of
+ # KMS key). If null or the region key is missing, encryption will not be
+ # configured for the SNS topic in that region.
config_sns_topic_kms_key_region_map = null
- # the name of the sns topic in where aws config notifications will be sent. can be
- # in the same account or in another account.
+ # the name of the sns topic in where aws config notifications will be sent.
+ # can be in the same account or in another account.
config_sns_topic_name = "ConfigTopic"
- # A map of tags to apply to the S3 Bucket. The key is the tag name and the value
- # is the tag value.
+ # A map of tags to apply to the S3 Bucket. The key is the tag name and the
+ # value is the tag value.
config_tags = {}
- # The maximum frequency with which AWS Config runs evaluations for the ´PERIODIC´
- # rules. See
- # https://www.terraform.io/docs/providers/aws/r/config_organization_managed_rule.h
- # ml#maximum_execution_frequency
+ # The maximum frequency with which AWS Config runs evaluations for the
+ # ´PERIODIC´ rules. See
+ # https://www.terraform.io/docs/providers/aws/r/config_organization_managed_rule.html#maximum_execution_frequency
configrules_maximum_execution_frequency = "TwentyFour_Hours"
# A custom name to use for the Cloudtrail Trail. If null, defaults to the
@@ -517,36 +526,36 @@ module "account_baseline_app" {
custom_cloudtrail_trail_name = null
# A list of AWS services for which the developers from the accounts in
- # var.allow_dev_access_from_other_account_arns will receive full permissions. See
- # https://goo.gl/ZyoHlz to find the IAM Service name. For example, to grant
- # developers access only to EC2 and Amazon Machine Learning, use the value
- # ["ec2","machinelearning"]. Do NOT add iam to the list of services, or that will
- # grant Developers de facto admin access.
+ # var.allow_dev_access_from_other_account_arns will receive full permissions.
+ # See https://goo.gl/ZyoHlz to find the IAM Service name. For example, to
+ # grant developers access only to EC2 and Amazon Machine Learning, use the
+ # value ["ec2","machinelearning"]. Do NOT add iam to the list of services, or
+ # that will grant Developers de facto admin access.
dev_permitted_services = []
- # If set to true (default), all new EBS volumes will have encryption enabled by
- # default
+ # If set to true (default), all new EBS volumes will have encryption enabled
+ # by default
ebs_enable_encryption = true
# The name of the KMS CMK to use by default for encrypting EBS volumes, if
- # var.enable_encryption and var.use_existing_kms_keys are enabled. The name must
- # match the name given the var.kms_customer_master_keys variable.
+ # var.enable_encryption and var.use_existing_kms_keys are enabled. The name
+ # must match the name given the var.kms_customer_master_keys variable.
ebs_kms_key_name = ""
# If set to true, the KMS Customer Managed Keys (CMK) with the name in
- # var.ebs_kms_key_name will be set as the default for EBS encryption. When false
- # (default), the AWS-managed aws/ebs key will be used.
+ # var.ebs_kms_key_name will be set as the default for EBS encryption. When
+ # false (default), the AWS-managed aws/ebs key will be used.
ebs_use_existing_kms_keys = false
- # Set to true (default) to enable CloudTrail in this app account. Set to false to
- # disable CloudTrail (note: all other CloudTrail variables will be ignored). Note
- # that if you have enabled organization trail in the root (parent) account, you
- # should set this to false; the organization trail will enable CloudTrail on child
- # accounts by default.
+ # Set to true (default) to enable CloudTrail in this app account. Set to false
+ # to disable CloudTrail (note: all other CloudTrail variables will be
+ # ignored). Note that if you have enabled organization trail in the root
+ # (parent) account, you should set this to false; the organization trail will
+ # enable CloudTrail on child accounts by default.
enable_cloudtrail = true
- # Set to true to enable AWS Config in this app account. Set to false to disable
- # AWS Config (note: all other AWS config variables will be ignored).
+ # Set to true to enable AWS Config in this app account. Set to false to
+ # disable AWS Config (note: all other AWS config variables will be ignored).
enable_config = true
# Checks whether the EBS volumes that are in an attached state are encrypted.
@@ -554,15 +563,15 @@ module "account_baseline_app" {
# When true, create an Open ID Connect Provider that GitHub actions can use to
# assume IAM roles in the account. Refer to
- # https://docs.github.com/en/actions/deployment/security-hardening-your-deployment
- # /configuring-openid-connect-in-amazon-web-services for more information.
+ # https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services
+ # for more information.
enable_github_actions_access = false
- # Set to true (default) to enable GuardDuty in this app account. Set to false to
- # disable GuardDuty (note: all other GuardDuty variables will be ignored). Note
- # that if you have enabled organization level GuardDuty in the root (parent)
- # account, you should set this to false; the organization GuardDuty will enable
- # GuardDuty on child accounts by default.
+ # Set to true (default) to enable GuardDuty in this app account. Set to false
+ # to disable GuardDuty (note: all other GuardDuty variables will be ignored).
+ # Note that if you have enabled organization level GuardDuty in the root
+ # (parent) account, you should set this to false; the organization GuardDuty
+ # will enable GuardDuty on child accounts by default.
enable_guardduty = true
# A feature flag to enable or disable this module.
@@ -580,15 +589,15 @@ module "account_baseline_app" {
# Password Policy variables will be ignored).
enable_iam_user_password_policy = true
- # Checks whether the security group with 0.0.0.0/0 of any Amazon Virtual Private
- # Cloud (Amazon VPC) allows only specific inbound TCP or UDP traffic.
+ # Checks whether the security group with 0.0.0.0/0 of any Amazon Virtual
+ # Private Cloud (Amazon VPC) allows only specific inbound TCP or UDP traffic.
enable_insecure_sg_rules = true
# Checks whether storage encryption is enabled for your RDS DB instances.
enable_rds_storage_encrypted = true
- # Checks whether users of your AWS account require a multi-factor authentication
- # (MFA) device to sign in with root credentials.
+ # Checks whether users of your AWS account require a multi-factor
+ # authentication (MFA) device to sign in with root credentials.
enable_root_account_mfa = true
# Checks that your Amazon S3 buckets do not allow public read access.
@@ -601,11 +610,11 @@ module "account_baseline_app" {
# configuring the encrypted volumes config rule.
encrypted_volumes_kms_id = null
- # When set, use the statically provided hardcoded list of thumbprints rather than
- # looking it up dynamically. This is useful if you want to trade reliability of
- # the OpenID Connect Provider across certificate renewals with a static list that
- # is obtained using a trustworthy mechanism, to mitigate potential damage from a
- # domain hijacking attack on GitHub domains.
+ # When set, use the statically provided hardcoded list of thumbprints rather
+ # than looking it up dynamically. This is useful if you want to trade
+ # reliability of the OpenID Connect Provider across certificate renewals with
+ # a static list that is obtained using a trustworthy mechanism, to mitigate
+ # potential damage from a domain hijacking attack on GitHub domains.
github_actions_openid_connect_provider_thumbprint_list = null
# Name of the Cloudwatch event rules.
@@ -614,9 +623,9 @@ module "account_baseline_app" {
# Specifies the frequency of notifications sent for subsequent finding
# occurrences. If the detector is a GuardDuty member account, the value is
# determined by the GuardDuty master account and cannot be modified, otherwise
- # defaults to SIX_HOURS. For standalone and GuardDuty master accounts, it must be
- # configured in Terraform to enable drift detection. Valid values for standalone
- # and master accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS.
+ # defaults to SIX_HOURS. For standalone and GuardDuty master accounts, it must
+ # be configured in Terraform to enable drift detection. Valid values for
+ # standalone and master accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS.
guardduty_finding_publishing_frequency = null
# Specifies a name for the created SNS topics where findings are published.
@@ -629,9 +638,9 @@ module "account_baseline_app" {
# The name of the IAM Access Analyzer module
iam_access_analyzer_name = "baseline_app-iam_access_analyzer"
- # If set to ORGANIZATION, the analyzer will be scanning the current organization
- # and any policies that refer to linked resources such as S3, IAM, Lambda and SQS
- # policies.
+ # If set to ORGANIZATION, the analyzer will be scanning the current
+ # organization and any policies that refer to linked resources such as S3,
+ # IAM, Lambda and SQS policies.
iam_access_analyzer_type = "ORGANIZATION"
# Allow users to change their own password.
@@ -664,50 +673,52 @@ module "account_baseline_app" {
# The tags to apply to all the IAM role resources.
iam_role_tags = {}
- # Comma-separated list of TCP ports authorized to be open to 0.0.0.0/0. Ranges are
- # defined by a dash; for example, '443,1020-1025'.
+ # Comma-separated list of TCP ports authorized to be open to 0.0.0.0/0. Ranges
+ # are defined by a dash; for example, '443,1020-1025'.
insecure_sg_rules_authorized_tcp_ports = "443"
- # Comma-separated list of UDP ports authorized to be open to 0.0.0.0/0. Ranges are
- # defined by a dash; for example, '500,1020-1025'.
+ # Comma-separated list of UDP ports authorized to be open to 0.0.0.0/0. Ranges
+ # are defined by a dash; for example, '500,1020-1025'.
insecure_sg_rules_authorized_udp_ports = null
- # A map of tags to apply to all KMS Keys to be created. In this map variable, the
- # key is the tag name and the value is the tag value.
+ # A map of tags to apply to all KMS Keys to be created. In this map variable,
+ # the key is the tag name and the value is the tag value.
kms_cmk_global_tags = {}
# You can use this variable to create account-level KMS Customer Master Keys
- # (CMKs) for encrypting and decrypting data. This variable should be a map where
- # the keys are the names of the CMK and the values are an object that defines the
- # configuration for that CMK. See the comment below for the configuration options
- # you can set for each key.
+ # (CMKs) for encrypting and decrypting data. This variable should be a map
+ # where the keys are the names of the CMK and the values are an object that
+ # defines the configuration for that CMK. See the comment below for the
+ # configuration options you can set for each key.
kms_customer_master_keys = {}
# The map of names of KMS grants to the region where the key resides in. There
- # should be a one to one mapping between entries in this map and the entries of
- # the kms_grants map. This is used to workaround a terraform limitation where the
- # for_each value can not depend on resources.
+ # should be a one to one mapping between entries in this map and the entries
+ # of the kms_grants map. This is used to workaround a terraform limitation
+ # where the for_each value can not depend on resources.
kms_grant_regions = {}
# Create the specified KMS grants to allow entities to use the KMS key without
- # modifying the KMS policy or IAM. This is necessary to allow AWS services (e.g.
- # ASG) to use CMKs encrypt and decrypt resources. The input is a map of grant name
- # to grant properties. The name must be unique per account.
+ # modifying the KMS policy or IAM. This is necessary to allow AWS services
+ # (e.g. ASG) to use CMKs encrypt and decrypt resources. The input is a map of
+ # grant name to grant properties. The name must be unique per account.
kms_grants = {}
- # The maximum allowable session duration, in seconds, for the credentials you get
- # when assuming the IAM roles created by this module. This variable applies to all
- # IAM roles created by this module that are intended for people to use, such as
- # allow-read-only-access-from-other-accounts. For IAM roles that are intended for
- # machine users, such as allow-auto-deploy-from-other-accounts, see
+ # The maximum allowable session duration, in seconds, for the credentials you
+ # get when assuming the IAM roles created by this module. This variable
+ # applies to all IAM roles created by this module that are intended for people
+ # to use, such as allow-read-only-access-from-other-accounts. For IAM roles
+ # that are intended for machine users, such as
+ # allow-auto-deploy-from-other-accounts, see
# var.max_session_duration_machine_users.
max_session_duration_human_users = 43200
- # The maximum allowable session duration, in seconds, for the credentials you get
- # when assuming the IAM roles created by this module. This variable applies to
- # all IAM roles created by this module that are intended for machine users, such
- # as allow-auto-deploy-from-other-accounts. For IAM roles that are intended for
- # human users, such as allow-read-only-access-from-other-accounts, see
+ # The maximum allowable session duration, in seconds, for the credentials you
+ # get when assuming the IAM roles created by this module. This variable
+ # applies to all IAM roles created by this module that are intended for
+ # machine users, such as allow-auto-deploy-from-other-accounts. For IAM roles
+ # that are intended for human users, such as
+ # allow-read-only-access-from-other-accounts, see
# var.max_session_duration_human_users.
max_session_duration_machine_users = 3600
@@ -716,26 +727,26 @@ module "account_baseline_app" {
rds_storage_encrypted_kms_id = null
# Create service-linked roles for this set of services. You should pass in the
- # URLs of the services, but without the protocol (e.g., http://) in front: e.g.,
- # use elasticbeanstalk.amazonaws.com for Elastic Beanstalk or es.amazonaws.com for
- # Amazon Elasticsearch. Service-linked roles are predefined by the service, can
- # typically only be assumed by that service, and include all the permissions that
- # the service requires to call other AWS services on your behalf. You can
- # typically only create one such role per AWS account, which is why this parameter
- # exists in the account baseline. See
- # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-wor
- # -with-iam.html for the list of services that support service-linked roles.
+ # URLs of the services, but without the protocol (e.g., http://) in front:
+ # e.g., use elasticbeanstalk.amazonaws.com for Elastic Beanstalk or
+ # es.amazonaws.com for Amazon Elasticsearch. Service-linked roles are
+ # predefined by the service, can typically only be assumed by that service,
+ # and include all the permissions that the service requires to call other AWS
+ # services on your behalf. You can typically only create one such role per AWS
+ # account, which is why this parameter exists in the account baseline. See
+ # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html
+ # for the list of services that support service-linked roles.
service_linked_roles = []
- # Should we require that all IAM Users use Multi-Factor Authentication for both
- # AWS API calls and the AWS Web Console? (true or false)
+ # Should we require that all IAM Users use Multi-Factor Authentication for
+ # both AWS API calls and the AWS Web Console? (true or false)
should_require_mfa = true
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
}
@@ -753,7 +764,7 @@ module "account_baseline_app" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/account-baseline-app?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/account-baseline-app?ref=v0.104.12"
}
inputs = {
@@ -770,85 +781,87 @@ inputs = {
# GuardDuty.
aws_region =
- # Creates resources in the specified regions. The best practice is to enable AWS
- # Config in all enabled regions in your AWS account. This variable must NOT be set
- # to null or empty. Otherwise, we won't know which regions to use and authenticate
- # to, and may use some not enabled in your AWS account (e.g., GovCloud, China,
- # etc). To get the list of regions enabled in your AWS account, you can use the
- # AWS CLI: aws ec2 describe-regions.
+ # Creates resources in the specified regions. The best practice is to enable
+ # AWS Config in all enabled regions in your AWS account. This variable must
+ # NOT be set to null or empty. Otherwise, we won't know which regions to use
+ # and authenticate to, and may use some not enabled in your AWS account (e.g.,
+ # GovCloud, China, etc). To get the list of regions enabled in your AWS
+ # account, you can use the AWS CLI: aws ec2 describe-regions.
config_opt_in_regions =
- # Creates resources in the specified regions. The best practice is to enable EBS
- # Encryption in all enabled regions in your AWS account. This variable must NOT be
- # set to null or empty. Otherwise, we won't know which regions to use and
- # authenticate to, and may use some not enabled in your AWS account (e.g.,
- # GovCloud, China, etc). To get the list of regions enabled in your AWS account,
- # you can use the AWS CLI: aws ec2 describe-regions. The value provided for
- # global_recorder_region must be in this list.
+ # Creates resources in the specified regions. The best practice is to enable
+ # EBS Encryption in all enabled regions in your AWS account. This variable
+ # must NOT be set to null or empty. Otherwise, we won't know which regions to
+ # use and authenticate to, and may use some not enabled in your AWS account
+ # (e.g., GovCloud, China, etc). To get the list of regions enabled in your AWS
+ # account, you can use the AWS CLI: aws ec2 describe-regions. The value
+ # provided for global_recorder_region must be in this list.
ebs_opt_in_regions =
# Creates resources in the specified regions. The best practice is to enable
- # GuardDuty in all enabled regions in your AWS account. This variable must NOT be
- # set to null or empty. Otherwise, we won't know which regions to use and
+ # GuardDuty in all enabled regions in your AWS account. This variable must NOT
+ # be set to null or empty. Otherwise, we won't know which regions to use and
# authenticate to, and may use some not enabled in your AWS account (e.g.,
- # GovCloud, China, etc). To get the list of regions enabled in your AWS account,
- # you can use the AWS CLI: aws ec2 describe-regions. The value provided for
- # global_recorder_region must be in this list.
+ # GovCloud, China, etc). To get the list of regions enabled in your AWS
+ # account, you can use the AWS CLI: aws ec2 describe-regions. The value
+ # provided for global_recorder_region must be in this list.
guardduty_opt_in_regions =
- # Creates resources in the specified regions. The best practice is to enable IAM
- # Access Analyzer in all enabled regions in your AWS account. This variable must
- # NOT be set to null or empty. Otherwise, we won't know which regions to use and
- # authenticate to, and may use some not enabled in your AWS account (e.g.,
- # GovCloud, China, etc). To get the list of regions enabled in your AWS account,
- # you can use the AWS CLI: aws ec2 describe-regions. The value provided for
- # global_recorder_region must be in this list.
+ # Creates resources in the specified regions. The best practice is to enable
+ # IAM Access Analyzer in all enabled regions in your AWS account. This
+ # variable must NOT be set to null or empty. Otherwise, we won't know which
+ # regions to use and authenticate to, and may use some not enabled in your AWS
+ # account (e.g., GovCloud, China, etc). To get the list of regions enabled in
+ # your AWS account, you can use the AWS CLI: aws ec2 describe-regions. The
+ # value provided for global_recorder_region must be in this list.
iam_access_analyzer_opt_in_regions =
# Creates resources in the specified regions. This variable must NOT be set to
- # null or empty. Otherwise, we won't know which regions to use and authenticate
- # to, and may use some not enabled in your AWS account (e.g., GovCloud, China,
- # etc). To get the list of regions enabled in your AWS account, you can use the
- # AWS CLI: aws ec2 describe-regions. The value provided for global_recorder_region
- # must be in this list.
+ # null or empty. Otherwise, we won't know which regions to use and
+ # authenticate to, and may use some not enabled in your AWS account (e.g.,
+ # GovCloud, China, etc). To get the list of regions enabled in your AWS
+ # account, you can use the AWS CLI: aws ec2 describe-regions. The value
+ # provided for global_recorder_region must be in this list.
kms_cmk_opt_in_regions =
- # The name used to prefix AWS Config and Cloudtrail resources, including the S3
- # bucket names and SNS topics used for each.
+ # The name used to prefix AWS Config and Cloudtrail resources, including the
+ # S3 bucket names and SNS topics used for each.
name_prefix =
# ----------------------------------------------------------------------------------------------------
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Map of additional managed rules to add. The key is the name of the rule (e.g.
- # ´acm-certificate-expiration-check´) and the value is an object specifying the
- # rule details
+ # Map of additional managed rules to add. The key is the name of the rule
+ # (e.g. ´acm-certificate-expiration-check´) and the value is an object
+ # specifying the rule details
additional_config_rules = {}
- # Map of github repositories to the list of branches that are allowed to assume
- # the IAM role. The repository should be encoded as org/repo-name (e.g.,
- # gruntwork-io/terrraform-aws-ci). Allows GitHub Actions to assume the auto deploy
- # IAM role using an OpenID Connect Provider for the given repositories. Refer to
- # the docs for github-actions-iam-role for more information. Note that this is
- # mutually exclusive with var.allow_auto_deploy_from_other_account_arns. Only used
- # if var.enable_github_actions_access is true.
+ # Map of github repositories to the list of branches that are allowed to
+ # assume the IAM role. The repository should be encoded as org/repo-name
+ # (e.g., gruntwork-io/terrraform-aws-ci). Allows GitHub Actions to assume the
+ # auto deploy IAM role using an OpenID Connect Provider for the given
+ # repositories. Refer to the docs for github-actions-iam-role for more
+ # information. Note that this is mutually exclusive with
+ # var.allow_auto_deploy_from_other_account_arns. Only used if
+ # var.enable_github_actions_access is true.
allow_auto_deploy_from_github_actions_for_sources = {}
- # A list of IAM ARNs from other AWS accounts that will be allowed to assume the
- # auto deploy IAM role that has the permissions in var.auto_deploy_permissions.
+ # A list of IAM ARNs from other AWS accounts that will be allowed to assume
+ # the auto deploy IAM role that has the permissions in
+ # var.auto_deploy_permissions.
allow_auto_deploy_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_auto_deploy_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed full (read and
- # write) access to the billing info for this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed full (read
+ # and write) access to the billing info for this account.
allow_billing_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_billing_access_iam_role_permissions_boundary = null
# If true, an IAM Policy that grants access to CloudTrail will be honored. If
@@ -856,316 +869,323 @@ inputs = {
# CloudTrail and any IAM Policy grants will be ignored. (true or false)
allow_cloudtrail_access_with_iam = true
- # A list of IAM ARNs from other AWS accounts that will be allowed full (read and
- # write) access to the services in this account specified in
+ # A list of IAM ARNs from other AWS accounts that will be allowed full (read
+ # and write) access to the services in this account specified in
# var.dev_permitted_services.
allow_dev_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_dev_access_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed full (read and
- # write) access to this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed full (read
+ # and write) access to this account.
allow_full_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_full_access_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed read access to
- # the logs in CloudTrail, AWS Config, and CloudWatch for this account. If
+ # A list of IAM ARNs from other AWS accounts that will be allowed read access
+ # to the logs in CloudTrail, AWS Config, and CloudWatch for this account. If
# var.cloudtrail_kms_key_arn is specified, will also be given permissions to
# decrypt with the KMS CMK that is used to encrypt CloudTrail logs.
allow_logs_access_from_other_account_arns = []
- # A list of IAM ARNs from other AWS accounts that will be allowed read-only access
- # to this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed read-only
+ # access to this account.
allow_read_only_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_read_only_access_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed read access to
- # IAM groups and publish SSH keys. This is used for ssh-grunt.
+ # A list of IAM ARNs from other AWS accounts that will be allowed read access
+ # to IAM groups and publish SSH keys. This is used for ssh-grunt.
allow_ssh_grunt_access_from_other_account_arns = []
- # A list of IAM ARNs from other AWS accounts that will be allowed access to AWS
- # support for this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed access to
+ # AWS support for this account.
allow_support_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_support_access_iam_role_permissions_boundary = null
- # A list of IAM permissions (e.g. ec2:*) that will be added to an IAM Group for
- # doing automated deployments. NOTE: If var.should_create_iam_group_auto_deploy is
- # true, the list must have at least one element (e.g. '*').
+ # A list of IAM permissions (e.g. ec2:*) that will be added to an IAM Group
+ # for doing automated deployments. NOTE: If
+ # var.should_create_iam_group_auto_deploy is true, the list must have at least
+ # one element (e.g. '*').
auto_deploy_permissions = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
aws_config_iam_role_permissions_boundary = null
# Whether or not to allow kms:DescribeKey to external AWS accounts with write
- # access to the CloudTrail bucket. This is useful during deployment so that you
- # don't have to pass around the KMS key ARN.
+ # access to the CloudTrail bucket. This is useful during deployment so that
+ # you don't have to pass around the KMS key ARN.
cloudtrail_allow_kms_describe_key_to_external_aws_accounts = false
- # Specify the name of the CloudWatch Logs group to publish the CloudTrail logs to.
- # This log group exists in the current account. Set this value to `null` to avoid
- # publishing the trail logs to the logs group. The recommended configuration for
- # CloudTrail is (a) for each child account to aggregate its logs in an S3 bucket
- # in a single central account, such as a logs account and (b) to also store 14
- # days work of logs in CloudWatch in the child account itself for local debugging.
+ # Specify the name of the CloudWatch Logs group to publish the CloudTrail logs
+ # to. This log group exists in the current account. Set this value to `null`
+ # to avoid publishing the trail logs to the logs group. The recommended
+ # configuration for CloudTrail is (a) for each child account to aggregate its
+ # logs in an S3 bucket in a single central account, such as a logs account and
+ # (b) to also store 14 days work of logs in CloudWatch in the child account
+ # itself for local debugging.
cloudtrail_cloudwatch_logs_group_name = "cloudtrail-logs"
# If true, logging of data events will be enabled.
cloudtrail_data_logging_enabled = false
- # Specify if you want your event selector to include management events for your
- # trail.
+ # Specify if you want your event selector to include management events for
+ # your trail.
cloudtrail_data_logging_include_management_events = true
- # Specify if you want your trail to log read-only events, write-only events, or
- # all. Possible values are: ReadOnly, WriteOnly, All.
+ # Specify if you want your trail to log read-only events, write-only events,
+ # or all. Possible values are: ReadOnly, WriteOnly, All.
cloudtrail_data_logging_read_write_type = "All"
- # Data resources for which to log data events. This should be a map, where each
- # key is a data resource type, and each value is a list of data resource values.
- # Possible values for data resource types are: AWS::S3::Object,
- # AWS::Lambda::Function and AWS::DynamoDB::Table. See the 'data_resource' block
- # within the 'event_selector' block of the 'aws_cloudtrail' resource for context:
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # trail#data_resource.
+ # Data resources for which to log data events. This should be a map, where
+ # each key is a data resource type, and each value is a list of data resource
+ # values. Possible values for data resource types are: AWS::S3::Object,
+ # AWS::Lambda::Function and AWS::DynamoDB::Table. See the 'data_resource'
+ # block within the 'event_selector' block of the 'aws_cloudtrail' resource for
+ # context:
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudtrail#data_resource.
cloudtrail_data_logging_resources = {}
- # Provide a list of AWS account IDs that will be allowed to send CloudTrail logs
- # to this account. This is only required if you are aggregating CloudTrail logs in
- # this account (e.g., this is the logs account) from other accounts.
+ # Provide a list of AWS account IDs that will be allowed to send CloudTrail
+ # logs to this account. This is only required if you are aggregating
+ # CloudTrail logs in this account (e.g., this is the logs account) from other
+ # accounts.
cloudtrail_external_aws_account_ids_with_write_access = []
- # If set to true, when you run 'terraform destroy', delete all objects from the
- # bucket so that the bucket can be destroyed without error. Warning: these objects
- # are not recoverable so only use this if you're absolutely sure you want to
- # permanently delete everything!
+ # If set to true, when you run 'terraform destroy', delete all objects from
+ # the bucket so that the bucket can be destroyed without error. Warning: these
+ # objects are not recoverable so only use this if you're absolutely sure you
+ # want to permanently delete everything!
cloudtrail_force_destroy = false
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
cloudtrail_iam_role_permissions_boundary = null
- # All CloudTrail Logs will be encrypted with a KMS CMK (Customer Master Key) that
- # governs access to write API calls older than 7 days and all read API calls. If
- # you are aggregating CloudTrail logs and creating the CMK in this account (e.g.,
- # if this is the logs account), you MUST specify at least one IAM user (or other
- # IAM ARN) that will be given administrator permissions for CMK, including the
- # ability to change who can access this CMK and the extended log data it protects.
- # If you are aggregating CloudTrail logs in another AWS account and the CMK
- # already exists (e.g., if this is the stage or prod account), set this parameter
- # to an empty list.
+ # All CloudTrail Logs will be encrypted with a KMS CMK (Customer Master Key)
+ # that governs access to write API calls older than 7 days and all read API
+ # calls. If you are aggregating CloudTrail logs and creating the CMK in this
+ # account (e.g., if this is the logs account), you MUST specify at least one
+ # IAM user (or other IAM ARN) that will be given administrator permissions for
+ # CMK, including the ability to change who can access this CMK and the
+ # extended log data it protects. If you are aggregating CloudTrail logs in
+ # another AWS account and the CMK already exists (e.g., if this is the stage
+ # or prod account), set this parameter to an empty list.
cloudtrail_kms_key_administrator_iam_arns = []
- # All CloudTrail Logs will be encrypted with a KMS CMK (Customer Master Key) that
- # governs access to write API calls older than 7 days and all read API calls. If
- # that CMK already exists (e.g., if this is the stage or prod account and you want
- # to use a CMK that already exists in the logs account), set this to the ARN of
- # that CMK. Otherwise (e.g., if this is the logs account), set this to null, and a
- # new CMK will be created.
+ # All CloudTrail Logs will be encrypted with a KMS CMK (Customer Master Key)
+ # that governs access to write API calls older than 7 days and all read API
+ # calls. If that CMK already exists (e.g., if this is the stage or prod
+ # account and you want to use a CMK that already exists in the logs account),
+ # set this to the ARN of that CMK. Otherwise (e.g., if this is the logs
+ # account), set this to null, and a new CMK will be created.
cloudtrail_kms_key_arn = null
- # If the kms_key_arn provided is an alias or alias ARN, then this must be set to
- # true so that the module will exchange the alias for a CMK ARN. Setting this to
- # true and using aliases requires
- # var.cloudtrail_allow_kms_describe_key_to_external_aws_accounts to also be true
- # for multi-account scenarios.
+ # If the kms_key_arn provided is an alias or alias ARN, then this must be set
+ # to true so that the module will exchange the alias for a CMK ARN. Setting
+ # this to true and using aliases requires
+ # var.cloudtrail_allow_kms_describe_key_to_external_aws_accounts to also be
+ # true for multi-account scenarios.
cloudtrail_kms_key_arn_is_alias = false
- # Additional service principals beyond CloudTrail that should have access to the
- # KMS key used to encrypt the logs. This is useful for granting access to the logs
- # for the purposes of constructing metric filters.
+ # Additional service principals beyond CloudTrail that should have access to
+ # the KMS key used to encrypt the logs. This is useful for granting access to
+ # the logs for the purposes of constructing metric filters.
cloudtrail_kms_key_service_principals = []
- # All CloudTrail Logs will be encrypted with a KMS CMK (Customer Master Key) that
- # governs access to write API calls older than 7 days and all read API calls. If
- # you are aggregating CloudTrail logs and creating the CMK in this account (e.g.,
- # this is the logs account), you MUST specify at least one IAM user (or other IAM
- # ARN) that will be given user access to this CMK, which will allow this user to
- # read CloudTrail Logs. If you are aggregating CloudTrail logs in another AWS
- # account and the CMK already exists, set this parameter to an empty list (e.g.,
- # if this is the stage or prod account).
+ # All CloudTrail Logs will be encrypted with a KMS CMK (Customer Master Key)
+ # that governs access to write API calls older than 7 days and all read API
+ # calls. If you are aggregating CloudTrail logs and creating the CMK in this
+ # account (e.g., this is the logs account), you MUST specify at least one IAM
+ # user (or other IAM ARN) that will be given user access to this CMK, which
+ # will allow this user to read CloudTrail Logs. If you are aggregating
+ # CloudTrail logs in another AWS account and the CMK already exists, set this
+ # parameter to an empty list (e.g., if this is the stage or prod account).
cloudtrail_kms_key_user_iam_arns = []
- # After this number of days, log files should be transitioned from S3 to Glacier.
- # Enter 0 to never archive log data.
+ # After this number of days, log files should be transitioned from S3 to
+ # Glacier. Enter 0 to never archive log data.
cloudtrail_num_days_after_which_archive_log_data = 30
- # After this number of days, log files should be deleted from S3. Enter 0 to never
- # delete log data.
+ # After this number of days, log files should be deleted from S3. Enter 0 to
+ # never delete log data.
cloudtrail_num_days_after_which_delete_log_data = 365
- # After this number of days, logs stored in CloudWatch will be deleted. Possible
- # values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827,
- # 3653, and 0 (default). When set to 0, logs will be retained indefinitely.
+ # After this number of days, logs stored in CloudWatch will be deleted.
+ # Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400,
+ # 545, 731, 1827, 3653, and 0 (default). When set to 0, logs will be retained
+ # indefinitely.
cloudtrail_num_days_to_retain_cloudwatch_logs = 0
# Set to false to create an S3 bucket of name var.cloudtrail_s3_bucket_name in
- # this account for storing CloudTrail logs (e.g., if this is the logs account).
- # Set to true to assume the bucket specified in var.cloudtrail_s3_bucket_name
- # already exists in another AWS account (e.g., if this is the stage or prod
- # account and var.cloudtrail_s3_bucket_name is the name of a bucket in the logs
- # account).
+ # this account for storing CloudTrail logs (e.g., if this is the logs
+ # account). Set to true to assume the bucket specified in
+ # var.cloudtrail_s3_bucket_name already exists in another AWS account (e.g.,
+ # if this is the stage or prod account and var.cloudtrail_s3_bucket_name is
+ # the name of a bucket in the logs account).
cloudtrail_s3_bucket_already_exists = true
# Optional whether or not to use Amazon S3 Bucket Keys for SSE-KMS.
cloudtrail_s3_bucket_key_enabled = false
- # The name of the S3 Bucket where CloudTrail logs will be stored. This could be a
- # bucket in this AWS account (e.g., if this is the logs account) or the name of a
- # bucket in another AWS account where logs should be sent (e.g., if this is the
- # stage or prod account and you're specifying the name of a bucket in the logs
- # account).
+ # The name of the S3 Bucket where CloudTrail logs will be stored. This could
+ # be a bucket in this AWS account (e.g., if this is the logs account) or the
+ # name of a bucket in another AWS account where logs should be sent (e.g., if
+ # this is the stage or prod account and you're specifying the name of a bucket
+ # in the logs account).
cloudtrail_s3_bucket_name = null
# Enable MFA delete for either 'Change the versioning state of your bucket' or
- # 'Permanently delete an object version'. This setting only applies to the bucket
- # used to storage Cloudtrail data. This cannot be used to toggle this setting but
- # is available to allow managed buckets to reflect the state in AWS. For
- # instructions on how to enable MFA Delete, check out the README from the
- # terraform-aws-security/private-s3-bucket module.
+ # 'Permanently delete an object version'. This setting only applies to the
+ # bucket used to storage Cloudtrail data. This cannot be used to toggle this
+ # setting but is available to allow managed buckets to reflect the state in
+ # AWS. For instructions on how to enable MFA Delete, check out the README from
+ # the terraform-aws-security/private-s3-bucket module.
cloudtrail_s3_mfa_delete = false
# Tags to apply to the CloudTrail resources.
cloudtrail_tags = {}
# Set to true to send the AWS Config data to another account (e.g., a logs
- # account) for aggregation purposes. You must set the ID of that other account via
- # the config_central_account_id variable. This redundant variable has to exist
- # because Terraform does not allow computed data in count and for_each parameters
- # and var.config_central_account_id may be computed if its the ID of a
- # newly-created AWS account.
+ # account) for aggregation purposes. You must set the ID of that other account
+ # via the config_central_account_id variable. This redundant variable has to
+ # exist because Terraform does not allow computed data in count and for_each
+ # parameters and var.config_central_account_id may be computed if its the ID
+ # of a newly-created AWS account.
config_aggregate_config_data_in_external_account = false
# If the S3 bucket and SNS topics used for AWS Config live in a different AWS
- # account, set this variable to the ID of that account (e.g., if this is the stage
- # or prod account, set this to the ID of the logs account). If the S3 bucket and
- # SNS topics live in this account (e.g., this is the logs account), set this
- # variable to null. Only used if
+ # account, set this variable to the ID of that account (e.g., if this is the
+ # stage or prod account, set this to the ID of the logs account). If the S3
+ # bucket and SNS topics live in this account (e.g., this is the logs account),
+ # set this variable to null. Only used if
# var.config_aggregate_config_data_in_external_account is true.
config_central_account_id = null
- # Set to true to create AWS Config rules directly in this account. Set false to
- # not create any Config rules in this account (i.e., if you created the rules at
- # the organization level already). We recommend setting this to true to use
- # account-level rules because org-level rules create a chicken-and-egg problem
- # with creating new accounts.
+ # Set to true to create AWS Config rules directly in this account. Set false
+ # to not create any Config rules in this account (i.e., if you created the
+ # rules at the organization level already). We recommend setting this to true
+ # to use account-level rules because org-level rules create a chicken-and-egg
+ # problem with creating new accounts.
config_create_account_rules = true
# Optional KMS key to use for encrypting S3 objects on the AWS Config delivery
- # channel for an externally managed S3 bucket. This must belong to the same region
- # as the destination S3 bucket. If null, AWS Config will default to encrypting the
- # delivered data with AES-256 encryption. Only used if var.should_create_s3_bucket
- # is false - otherwise, var.config_s3_bucket_kms_key_arn is used.
+ # channel for an externally managed S3 bucket. This must belong to the same
+ # region as the destination S3 bucket. If null, AWS Config will default to
+ # encrypting the delivered data with AES-256 encryption. Only used if
+ # var.should_create_s3_bucket is false - otherwise,
+ # var.config_s3_bucket_kms_key_arn is used.
config_delivery_channel_kms_key_arn = null
- # Same as var.config_delivery_channel_kms_key_arn, except the value is a name of a
- # KMS key configured with var.kms_customer_master_keys. The module created KMS key
- # for the delivery region (indexed by the name) will be used. Note that if both
- # var.config_delivery_channel_kms_key_arn and
+ # Same as var.config_delivery_channel_kms_key_arn, except the value is a name
+ # of a KMS key configured with var.kms_customer_master_keys. The module
+ # created KMS key for the delivery region (indexed by the name) will be used.
+ # Note that if both var.config_delivery_channel_kms_key_arn and
# var.config_delivery_channel_kms_key_by_name are configured, the key in
# var.config_delivery_channel_kms_key_arn will always be used.
config_delivery_channel_kms_key_by_name = null
- # If set to true, when you run 'terraform destroy', delete all objects from the
- # bucket so that the bucket can be destroyed without error. Warning: these objects
- # are not recoverable so only use this if you're absolutely sure you want to
- # permanently delete everything!
+ # If set to true, when you run 'terraform destroy', delete all objects from
+ # the bucket so that the bucket can be destroyed without error. Warning: these
+ # objects are not recoverable so only use this if you're absolutely sure you
+ # want to permanently delete everything!
config_force_destroy = false
- # Provide a list of AWS account IDs that will be allowed to send AWS Config data
- # to this account. This is only required if you are aggregating config data in
- # this account (e.g., this is the logs account) from other accounts.
+ # Provide a list of AWS account IDs that will be allowed to send AWS Config
+ # data to this account. This is only required if you are aggregating config
+ # data in this account (e.g., this is the logs account) from other accounts.
config_linked_accounts = []
- # After this number of days, log files should be transitioned from S3 to Glacier.
- # Enter 0 to never archive log data.
+ # After this number of days, log files should be transitioned from S3 to
+ # Glacier. Enter 0 to never archive log data.
config_num_days_after_which_archive_log_data = 365
- # After this number of days, log files should be deleted from S3. Enter 0 to never
- # delete log data.
+ # After this number of days, log files should be deleted from S3. Enter 0 to
+ # never delete log data.
config_num_days_after_which_delete_log_data = 730
- # Optional KMS key to use for encrypting S3 objects on the AWS Config bucket, when
- # the S3 bucket is created within this module (var.config_should_create_s3_bucket
- # is true). For encrypting S3 objects on delivery for an externally managed S3
- # bucket, refer to the var.config_delivery_channel_kms_key_arn input variable. If
- # null, data in S3 will be encrypted using the default aws/s3 key. If provided,
- # the key policy of the provided key must permit the IAM role used by AWS Config.
- # See https://docs.aws.amazon.com/sns/latest/dg/sns-key-management.html. Note that
+ # Optional KMS key to use for encrypting S3 objects on the AWS Config bucket,
+ # when the S3 bucket is created within this module
+ # (var.config_should_create_s3_bucket is true). For encrypting S3 objects on
+ # delivery for an externally managed S3 bucket, refer to the
+ # var.config_delivery_channel_kms_key_arn input variable. If null, data in S3
+ # will be encrypted using the default aws/s3 key. If provided, the key policy
+ # of the provided key must permit the IAM role used by AWS Config. See
+ # https://docs.aws.amazon.com/sns/latest/dg/sns-key-management.html. Note that
# the KMS key must reside in the global recorder region (as configured by
# var.aws_region).
config_s3_bucket_kms_key_arn = null
- # Same as var.config_s3_bucket_kms_key_arn, except the value is a name of a KMS
- # key configured with var.kms_customer_master_keys. The module created KMS key for
- # the global recorder region (indexed by the name) will be used. Note that if both
- # var.config_s3_bucket_kms_key_arn and var.config_s3_bucket_kms_key_by_name are
- # configured, the key in var.config_s3_bucket_kms_key_arn will always be used.
+ # Same as var.config_s3_bucket_kms_key_arn, except the value is a name of a
+ # KMS key configured with var.kms_customer_master_keys. The module created KMS
+ # key for the global recorder region (indexed by the name) will be used. Note
+ # that if both var.config_s3_bucket_kms_key_arn and
+ # var.config_s3_bucket_kms_key_by_name are configured, the key in
+ # var.config_s3_bucket_kms_key_arn will always be used.
config_s3_bucket_kms_key_by_name = null
- # The name of the S3 Bucket where Config items will be stored. Can be in the same
- # account or in another account.
+ # The name of the S3 Bucket where Config items will be stored. Can be in the
+ # same account or in another account.
config_s3_bucket_name = null
# Enable MFA delete for either 'Change the versioning state of your bucket' or
- # 'Permanently delete an object version'. This setting only applies to the bucket
- # used to storage AWS Config data. This cannot be used to toggle this setting but
- # is available to allow managed buckets to reflect the state in AWS. For
- # instructions on how to enable MFA Delete, check out the README from the
- # terraform-aws-security/private-s3-bucket module.
+ # 'Permanently delete an object version'. This setting only applies to the
+ # bucket used to storage AWS Config data. This cannot be used to toggle this
+ # setting but is available to allow managed buckets to reflect the state in
+ # AWS. For instructions on how to enable MFA Delete, check out the README from
+ # the terraform-aws-security/private-s3-bucket module.
config_s3_mfa_delete = false
# Set to true to create an S3 bucket of name var.config_s3_bucket_name in this
- # account for storing AWS Config data (e.g., if this is the logs account). Set to
- # false to assume the bucket specified in var.config_s3_bucket_name already exists
- # in another AWS account (e.g., if this is the stage or prod account and
- # var.config_s3_bucket_name is the name of a bucket in the logs account).
+ # account for storing AWS Config data (e.g., if this is the logs account). Set
+ # to false to assume the bucket specified in var.config_s3_bucket_name already
+ # exists in another AWS account (e.g., if this is the stage or prod account
+ # and var.config_s3_bucket_name is the name of a bucket in the logs account).
config_should_create_s3_bucket = false
# set to true to create an sns topic in this account for sending aws config
- # notifications (e.g., if this is the logs account). set to false to assume the
- # topic specified in var.config_sns_topic_name already exists in another aws
- # account (e.g., if this is the stage or prod account and
+ # notifications (e.g., if this is the logs account). set to false to assume
+ # the topic specified in var.config_sns_topic_name already exists in another
+ # aws account (e.g., if this is the stage or prod account and
# var.config_sns_topic_name is the name of an sns topic in the logs account).
config_should_create_sns_topic = false
- # Same as var.config_sns_topic_kms_key_region_map, except the value is a name of a
- # KMS key configured with var.kms_customer_master_keys. The module created KMS key
- # for each region (indexed by the name) will be used. Note that if an entry exists
- # for a region in both var.config_sns_topic_kms_key_region_map and
+ # Same as var.config_sns_topic_kms_key_region_map, except the value is a name
+ # of a KMS key configured with var.kms_customer_master_keys. The module
+ # created KMS key for each region (indexed by the name) will be used. Note
+ # that if an entry exists for a region in both
+ # var.config_sns_topic_kms_key_region_map and
# var.config_sns_topic_kms_key_by_name_region_map, then the key in
# var.config_sns_topic_kms_key_region_map will always be used.
config_sns_topic_kms_key_by_name_region_map = null
- # Optional KMS key to use for each region for configuring default encryption for
- # the SNS topic (encoded as a map from region - e.g. us-east-1 - to ARN of KMS
- # key). If null or the region key is missing, encryption will not be configured
- # for the SNS topic in that region.
+ # Optional KMS key to use for each region for configuring default encryption
+ # for the SNS topic (encoded as a map from region - e.g. us-east-1 - to ARN of
+ # KMS key). If null or the region key is missing, encryption will not be
+ # configured for the SNS topic in that region.
config_sns_topic_kms_key_region_map = null
- # the name of the sns topic in where aws config notifications will be sent. can be
- # in the same account or in another account.
+ # the name of the sns topic in where aws config notifications will be sent.
+ # can be in the same account or in another account.
config_sns_topic_name = "ConfigTopic"
- # A map of tags to apply to the S3 Bucket. The key is the tag name and the value
- # is the tag value.
+ # A map of tags to apply to the S3 Bucket. The key is the tag name and the
+ # value is the tag value.
config_tags = {}
- # The maximum frequency with which AWS Config runs evaluations for the ´PERIODIC´
- # rules. See
- # https://www.terraform.io/docs/providers/aws/r/config_organization_managed_rule.h
- # ml#maximum_execution_frequency
+ # The maximum frequency with which AWS Config runs evaluations for the
+ # ´PERIODIC´ rules. See
+ # https://www.terraform.io/docs/providers/aws/r/config_organization_managed_rule.html#maximum_execution_frequency
configrules_maximum_execution_frequency = "TwentyFour_Hours"
# A custom name to use for the Cloudtrail Trail. If null, defaults to the
@@ -1173,36 +1193,36 @@ inputs = {
custom_cloudtrail_trail_name = null
# A list of AWS services for which the developers from the accounts in
- # var.allow_dev_access_from_other_account_arns will receive full permissions. See
- # https://goo.gl/ZyoHlz to find the IAM Service name. For example, to grant
- # developers access only to EC2 and Amazon Machine Learning, use the value
- # ["ec2","machinelearning"]. Do NOT add iam to the list of services, or that will
- # grant Developers de facto admin access.
+ # var.allow_dev_access_from_other_account_arns will receive full permissions.
+ # See https://goo.gl/ZyoHlz to find the IAM Service name. For example, to
+ # grant developers access only to EC2 and Amazon Machine Learning, use the
+ # value ["ec2","machinelearning"]. Do NOT add iam to the list of services, or
+ # that will grant Developers de facto admin access.
dev_permitted_services = []
- # If set to true (default), all new EBS volumes will have encryption enabled by
- # default
+ # If set to true (default), all new EBS volumes will have encryption enabled
+ # by default
ebs_enable_encryption = true
# The name of the KMS CMK to use by default for encrypting EBS volumes, if
- # var.enable_encryption and var.use_existing_kms_keys are enabled. The name must
- # match the name given the var.kms_customer_master_keys variable.
+ # var.enable_encryption and var.use_existing_kms_keys are enabled. The name
+ # must match the name given the var.kms_customer_master_keys variable.
ebs_kms_key_name = ""
# If set to true, the KMS Customer Managed Keys (CMK) with the name in
- # var.ebs_kms_key_name will be set as the default for EBS encryption. When false
- # (default), the AWS-managed aws/ebs key will be used.
+ # var.ebs_kms_key_name will be set as the default for EBS encryption. When
+ # false (default), the AWS-managed aws/ebs key will be used.
ebs_use_existing_kms_keys = false
- # Set to true (default) to enable CloudTrail in this app account. Set to false to
- # disable CloudTrail (note: all other CloudTrail variables will be ignored). Note
- # that if you have enabled organization trail in the root (parent) account, you
- # should set this to false; the organization trail will enable CloudTrail on child
- # accounts by default.
+ # Set to true (default) to enable CloudTrail in this app account. Set to false
+ # to disable CloudTrail (note: all other CloudTrail variables will be
+ # ignored). Note that if you have enabled organization trail in the root
+ # (parent) account, you should set this to false; the organization trail will
+ # enable CloudTrail on child accounts by default.
enable_cloudtrail = true
- # Set to true to enable AWS Config in this app account. Set to false to disable
- # AWS Config (note: all other AWS config variables will be ignored).
+ # Set to true to enable AWS Config in this app account. Set to false to
+ # disable AWS Config (note: all other AWS config variables will be ignored).
enable_config = true
# Checks whether the EBS volumes that are in an attached state are encrypted.
@@ -1210,15 +1230,15 @@ inputs = {
# When true, create an Open ID Connect Provider that GitHub actions can use to
# assume IAM roles in the account. Refer to
- # https://docs.github.com/en/actions/deployment/security-hardening-your-deployment
- # /configuring-openid-connect-in-amazon-web-services for more information.
+ # https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services
+ # for more information.
enable_github_actions_access = false
- # Set to true (default) to enable GuardDuty in this app account. Set to false to
- # disable GuardDuty (note: all other GuardDuty variables will be ignored). Note
- # that if you have enabled organization level GuardDuty in the root (parent)
- # account, you should set this to false; the organization GuardDuty will enable
- # GuardDuty on child accounts by default.
+ # Set to true (default) to enable GuardDuty in this app account. Set to false
+ # to disable GuardDuty (note: all other GuardDuty variables will be ignored).
+ # Note that if you have enabled organization level GuardDuty in the root
+ # (parent) account, you should set this to false; the organization GuardDuty
+ # will enable GuardDuty on child accounts by default.
enable_guardduty = true
# A feature flag to enable or disable this module.
@@ -1236,15 +1256,15 @@ inputs = {
# Password Policy variables will be ignored).
enable_iam_user_password_policy = true
- # Checks whether the security group with 0.0.0.0/0 of any Amazon Virtual Private
- # Cloud (Amazon VPC) allows only specific inbound TCP or UDP traffic.
+ # Checks whether the security group with 0.0.0.0/0 of any Amazon Virtual
+ # Private Cloud (Amazon VPC) allows only specific inbound TCP or UDP traffic.
enable_insecure_sg_rules = true
# Checks whether storage encryption is enabled for your RDS DB instances.
enable_rds_storage_encrypted = true
- # Checks whether users of your AWS account require a multi-factor authentication
- # (MFA) device to sign in with root credentials.
+ # Checks whether users of your AWS account require a multi-factor
+ # authentication (MFA) device to sign in with root credentials.
enable_root_account_mfa = true
# Checks that your Amazon S3 buckets do not allow public read access.
@@ -1257,11 +1277,11 @@ inputs = {
# configuring the encrypted volumes config rule.
encrypted_volumes_kms_id = null
- # When set, use the statically provided hardcoded list of thumbprints rather than
- # looking it up dynamically. This is useful if you want to trade reliability of
- # the OpenID Connect Provider across certificate renewals with a static list that
- # is obtained using a trustworthy mechanism, to mitigate potential damage from a
- # domain hijacking attack on GitHub domains.
+ # When set, use the statically provided hardcoded list of thumbprints rather
+ # than looking it up dynamically. This is useful if you want to trade
+ # reliability of the OpenID Connect Provider across certificate renewals with
+ # a static list that is obtained using a trustworthy mechanism, to mitigate
+ # potential damage from a domain hijacking attack on GitHub domains.
github_actions_openid_connect_provider_thumbprint_list = null
# Name of the Cloudwatch event rules.
@@ -1270,9 +1290,9 @@ inputs = {
# Specifies the frequency of notifications sent for subsequent finding
# occurrences. If the detector is a GuardDuty member account, the value is
# determined by the GuardDuty master account and cannot be modified, otherwise
- # defaults to SIX_HOURS. For standalone and GuardDuty master accounts, it must be
- # configured in Terraform to enable drift detection. Valid values for standalone
- # and master accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS.
+ # defaults to SIX_HOURS. For standalone and GuardDuty master accounts, it must
+ # be configured in Terraform to enable drift detection. Valid values for
+ # standalone and master accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS.
guardduty_finding_publishing_frequency = null
# Specifies a name for the created SNS topics where findings are published.
@@ -1285,9 +1305,9 @@ inputs = {
# The name of the IAM Access Analyzer module
iam_access_analyzer_name = "baseline_app-iam_access_analyzer"
- # If set to ORGANIZATION, the analyzer will be scanning the current organization
- # and any policies that refer to linked resources such as S3, IAM, Lambda and SQS
- # policies.
+ # If set to ORGANIZATION, the analyzer will be scanning the current
+ # organization and any policies that refer to linked resources such as S3,
+ # IAM, Lambda and SQS policies.
iam_access_analyzer_type = "ORGANIZATION"
# Allow users to change their own password.
@@ -1320,50 +1340,52 @@ inputs = {
# The tags to apply to all the IAM role resources.
iam_role_tags = {}
- # Comma-separated list of TCP ports authorized to be open to 0.0.0.0/0. Ranges are
- # defined by a dash; for example, '443,1020-1025'.
+ # Comma-separated list of TCP ports authorized to be open to 0.0.0.0/0. Ranges
+ # are defined by a dash; for example, '443,1020-1025'.
insecure_sg_rules_authorized_tcp_ports = "443"
- # Comma-separated list of UDP ports authorized to be open to 0.0.0.0/0. Ranges are
- # defined by a dash; for example, '500,1020-1025'.
+ # Comma-separated list of UDP ports authorized to be open to 0.0.0.0/0. Ranges
+ # are defined by a dash; for example, '500,1020-1025'.
insecure_sg_rules_authorized_udp_ports = null
- # A map of tags to apply to all KMS Keys to be created. In this map variable, the
- # key is the tag name and the value is the tag value.
+ # A map of tags to apply to all KMS Keys to be created. In this map variable,
+ # the key is the tag name and the value is the tag value.
kms_cmk_global_tags = {}
# You can use this variable to create account-level KMS Customer Master Keys
- # (CMKs) for encrypting and decrypting data. This variable should be a map where
- # the keys are the names of the CMK and the values are an object that defines the
- # configuration for that CMK. See the comment below for the configuration options
- # you can set for each key.
+ # (CMKs) for encrypting and decrypting data. This variable should be a map
+ # where the keys are the names of the CMK and the values are an object that
+ # defines the configuration for that CMK. See the comment below for the
+ # configuration options you can set for each key.
kms_customer_master_keys = {}
# The map of names of KMS grants to the region where the key resides in. There
- # should be a one to one mapping between entries in this map and the entries of
- # the kms_grants map. This is used to workaround a terraform limitation where the
- # for_each value can not depend on resources.
+ # should be a one to one mapping between entries in this map and the entries
+ # of the kms_grants map. This is used to workaround a terraform limitation
+ # where the for_each value can not depend on resources.
kms_grant_regions = {}
# Create the specified KMS grants to allow entities to use the KMS key without
- # modifying the KMS policy or IAM. This is necessary to allow AWS services (e.g.
- # ASG) to use CMKs encrypt and decrypt resources. The input is a map of grant name
- # to grant properties. The name must be unique per account.
+ # modifying the KMS policy or IAM. This is necessary to allow AWS services
+ # (e.g. ASG) to use CMKs encrypt and decrypt resources. The input is a map of
+ # grant name to grant properties. The name must be unique per account.
kms_grants = {}
- # The maximum allowable session duration, in seconds, for the credentials you get
- # when assuming the IAM roles created by this module. This variable applies to all
- # IAM roles created by this module that are intended for people to use, such as
- # allow-read-only-access-from-other-accounts. For IAM roles that are intended for
- # machine users, such as allow-auto-deploy-from-other-accounts, see
+ # The maximum allowable session duration, in seconds, for the credentials you
+ # get when assuming the IAM roles created by this module. This variable
+ # applies to all IAM roles created by this module that are intended for people
+ # to use, such as allow-read-only-access-from-other-accounts. For IAM roles
+ # that are intended for machine users, such as
+ # allow-auto-deploy-from-other-accounts, see
# var.max_session_duration_machine_users.
max_session_duration_human_users = 43200
- # The maximum allowable session duration, in seconds, for the credentials you get
- # when assuming the IAM roles created by this module. This variable applies to
- # all IAM roles created by this module that are intended for machine users, such
- # as allow-auto-deploy-from-other-accounts. For IAM roles that are intended for
- # human users, such as allow-read-only-access-from-other-accounts, see
+ # The maximum allowable session duration, in seconds, for the credentials you
+ # get when assuming the IAM roles created by this module. This variable
+ # applies to all IAM roles created by this module that are intended for
+ # machine users, such as allow-auto-deploy-from-other-accounts. For IAM roles
+ # that are intended for human users, such as
+ # allow-read-only-access-from-other-accounts, see
# var.max_session_duration_human_users.
max_session_duration_machine_users = 3600
@@ -1372,26 +1394,26 @@ inputs = {
rds_storage_encrypted_kms_id = null
# Create service-linked roles for this set of services. You should pass in the
- # URLs of the services, but without the protocol (e.g., http://) in front: e.g.,
- # use elasticbeanstalk.amazonaws.com for Elastic Beanstalk or es.amazonaws.com for
- # Amazon Elasticsearch. Service-linked roles are predefined by the service, can
- # typically only be assumed by that service, and include all the permissions that
- # the service requires to call other AWS services on your behalf. You can
- # typically only create one such role per AWS account, which is why this parameter
- # exists in the account baseline. See
- # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-wor
- # -with-iam.html for the list of services that support service-linked roles.
+ # URLs of the services, but without the protocol (e.g., http://) in front:
+ # e.g., use elasticbeanstalk.amazonaws.com for Elastic Beanstalk or
+ # es.amazonaws.com for Amazon Elasticsearch. Service-linked roles are
+ # predefined by the service, can typically only be assumed by that service,
+ # and include all the permissions that the service requires to call other AWS
+ # services on your behalf. You can typically only create one such role per AWS
+ # account, which is why this parameter exists in the account baseline. See
+ # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html
+ # for the list of services that support service-linked roles.
service_linked_roles = []
- # Should we require that all IAM Users use Multi-Factor Authentication for both
- # AWS API calls and the AWS Web Console? (true or false)
+ # Should we require that all IAM Users use Multi-Factor Authentication for
+ # both AWS API calls and the AWS Web Console? (true or false)
should_require_mfa = true
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
}
@@ -3087,11 +3109,11 @@ A map of ARNs of the service linked roles created from
diff --git a/docs/reference/services/landing-zone/aws-root-account-baseline-wrapper.md b/docs/reference/services/landing-zone/aws-root-account-baseline-wrapper.md
index 84691b20e4..658c542997 100644
--- a/docs/reference/services/landing-zone/aws-root-account-baseline-wrapper.md
+++ b/docs/reference/services/landing-zone/aws-root-account-baseline-wrapper.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Account Baseline for root account
- View Source
+View Source
Release Notes
@@ -58,16 +58,16 @@ If you’ve never used the Service Catalog before, make sure to read
* Learn more about each individual module, click the link in the [Features](#features) section
* [How to configure a production-grade AWS account structure](https://docs.gruntwork.io/guides/build-it-yourself/landing-zone/)
-* [How to create child accounts](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/core-concepts.md#creating-child-accounts)
-* [How to aggregate AWS Config and CloudTrail data in a logs account](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/core-concepts.md#aggregating-aws-config-and-cloudtrail-data-in-a-logs-account)
-* [Why does this module use account-level AWS Config Rules?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/core-concepts.md#why-does-this-module-use-account-level-aws-config-rules)
-* [How to use multi-region services](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/core-concepts.md#how-to-use-multi-region-services)
+* [How to create child accounts](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/core-concepts.md#creating-child-accounts)
+* [How to aggregate AWS Config and CloudTrail data in a logs account](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/core-concepts.md#aggregating-aws-config-and-cloudtrail-data-in-a-logs-account)
+* [Why does this module use account-level AWS Config Rules?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/core-concepts.md#why-does-this-module-use-account-level-aws-config-rules)
+* [How to use multi-region services](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/core-concepts.md#how-to-use-multi-region-services)
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/test): Automated tests for the modules and examples.
## Deploy
@@ -75,7 +75,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing/landingzone folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing/landingzone): The
+* [examples/for-learning-and-testing/landingzone folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing/landingzone): The
`examples/for-learning-and-testing/landingzone` folder contains standalone sample code optimized for learning,
experimenting, and testing (but not direct production usage).
@@ -83,7 +83,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end integrated tech stack on top of the Gruntwork Service Catalog.
@@ -104,7 +104,7 @@ If you want to deploy this repo in production, check out the following resources
module "account_baseline_root" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/account-baseline-root?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/account-baseline-root?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -118,82 +118,84 @@ module "account_baseline_root" {
# GuardDuty.
aws_region =
- # Map of child accounts to create. The map key is the name of the account and the
- # value is an object containing account configuration variables. See the comments
- # below for what keys and values this object should contain.
+ # Map of child accounts to create. The map key is the name of the account and
+ # the value is an object containing account configuration variables. See the
+ # comments below for what keys and values this object should contain.
child_accounts =
- # Creates resources in the specified regions. The best practice is to enable AWS
- # Config in all enabled regions in your AWS account. This variable must NOT be set
- # to null or empty. Otherwise, we won't know which regions to use and authenticate
- # to, and may use some not enabled in your AWS account (e.g., GovCloud, China,
- # etc). To get the list of regions enabled in your AWS account, you can use the
- # AWS CLI: aws ec2 describe-regions.
+ # Creates resources in the specified regions. The best practice is to enable
+ # AWS Config in all enabled regions in your AWS account. This variable must
+ # NOT be set to null or empty. Otherwise, we won't know which regions to use
+ # and authenticate to, and may use some not enabled in your AWS account (e.g.,
+ # GovCloud, China, etc). To get the list of regions enabled in your AWS
+ # account, you can use the AWS CLI: aws ec2 describe-regions.
config_opt_in_regions =
- # Creates resources in the specified regions. The best practice is to enable EBS
- # Encryption in all enabled regions in your AWS account. This variable must NOT be
- # set to null or empty. Otherwise, we won't know which regions to use and
- # authenticate to, and may use some not enabled in your AWS account (e.g.,
- # GovCloud, China, etc). To get the list of regions enabled in your AWS account,
- # you can use the AWS CLI: aws ec2 describe-regions. The value provided for
- # global_recorder_region must be in this list.
+ # Creates resources in the specified regions. The best practice is to enable
+ # EBS Encryption in all enabled regions in your AWS account. This variable
+ # must NOT be set to null or empty. Otherwise, we won't know which regions to
+ # use and authenticate to, and may use some not enabled in your AWS account
+ # (e.g., GovCloud, China, etc). To get the list of regions enabled in your AWS
+ # account, you can use the AWS CLI: aws ec2 describe-regions. The value
+ # provided for global_recorder_region must be in this list.
ebs_opt_in_regions =
# Creates resources in the specified regions. The best practice is to enable
- # GuardDuty in all enabled regions in your AWS account. This variable must NOT be
- # set to null or empty. Otherwise, we won't know which regions to use and
+ # GuardDuty in all enabled regions in your AWS account. This variable must NOT
+ # be set to null or empty. Otherwise, we won't know which regions to use and
# authenticate to, and may use some not enabled in your AWS account (e.g.,
- # GovCloud, China, etc). To get the list of regions enabled in your AWS account,
- # you can use the AWS CLI: aws ec2 describe-regions. The value provided for
- # global_recorder_region must be in this list.
+ # GovCloud, China, etc). To get the list of regions enabled in your AWS
+ # account, you can use the AWS CLI: aws ec2 describe-regions. The value
+ # provided for global_recorder_region must be in this list.
guardduty_opt_in_regions =
- # Creates resources in the specified regions. The best practice is to enable IAM
- # Access Analyzer in all enabled regions in your AWS account. This variable must
- # NOT be set to null or empty. Otherwise, we won't know which regions to use and
- # authenticate to, and may use some not enabled in your AWS account (e.g.,
- # GovCloud, China, etc). To get the list of regions enabled in your AWS account,
- # you can use the AWS CLI: aws ec2 describe-regions. The value provided for
- # global_recorder_region must be in this list.
+ # Creates resources in the specified regions. The best practice is to enable
+ # IAM Access Analyzer in all enabled regions in your AWS account. This
+ # variable must NOT be set to null or empty. Otherwise, we won't know which
+ # regions to use and authenticate to, and may use some not enabled in your AWS
+ # account (e.g., GovCloud, China, etc). To get the list of regions enabled in
+ # your AWS account, you can use the AWS CLI: aws ec2 describe-regions. The
+ # value provided for global_recorder_region must be in this list.
iam_access_analyzer_opt_in_regions =
- # The name used to prefix AWS Config and Cloudtrail resources, including the S3
- # bucket names and SNS topics used for each.
+ # The name used to prefix AWS Config and Cloudtrail resources, including the
+ # S3 bucket names and SNS topics used for each.
name_prefix =
# ----------------------------------------------------------------------------------------------------
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Map of additional managed rules to add. The key is the name of the rule (e.g.
- # ´acm-certificate-expiration-check´) and the value is an object specifying the
- # rule details
+ # Map of additional managed rules to add. The key is the name of the rule
+ # (e.g. ´acm-certificate-expiration-check´) and the value is an object
+ # specifying the rule details
additional_config_rules = {}
- # Map of github repositories to the list of branches that are allowed to assume
- # the IAM role. The repository should be encoded as org/repo-name (e.g.,
- # gruntwork-io/terrraform-aws-ci). Allows GitHub Actions to assume the auto deploy
- # IAM role using an OpenID Connect Provider for the given repositories. Refer to
- # the docs for github-actions-iam-role for more information. Note that this is
- # mutually exclusive with var.allow_auto_deploy_from_other_account_arns. Only used
- # if var.enable_github_actions_access is true.
+ # Map of github repositories to the list of branches that are allowed to
+ # assume the IAM role. The repository should be encoded as org/repo-name
+ # (e.g., gruntwork-io/terrraform-aws-ci). Allows GitHub Actions to assume the
+ # auto deploy IAM role using an OpenID Connect Provider for the given
+ # repositories. Refer to the docs for github-actions-iam-role for more
+ # information. Note that this is mutually exclusive with
+ # var.allow_auto_deploy_from_other_account_arns. Only used if
+ # var.enable_github_actions_access is true.
allow_auto_deploy_from_github_actions_for_sources = {}
- # A list of IAM ARNs from other AWS accounts that will be allowed to assume the
- # auto deploy IAM role that has the permissions in var.auto_deploy_permissions.
+ # A list of IAM ARNs from other AWS accounts that will be allowed to assume
+ # the auto deploy IAM role that has the permissions in
+ # var.auto_deploy_permissions.
allow_auto_deploy_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_auto_deploy_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed full (read and
- # write) access to the billing info for this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed full (read
+ # and write) access to the billing info for this account.
allow_billing_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_billing_access_iam_role_permissions_boundary = null
# If true, an IAM Policy that grants access to CloudTrail will be honored. If
@@ -201,374 +203,380 @@ module "account_baseline_root" {
# CloudTrail and any IAM Policy grants will be ignored. (true or false)
allow_cloudtrail_access_with_iam = true
- # A list of IAM ARNs from other AWS accounts that will be allowed full (read and
- # write) access to the services in this account specified in
+ # A list of IAM ARNs from other AWS accounts that will be allowed full (read
+ # and write) access to the services in this account specified in
# var.dev_permitted_services.
allow_dev_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_dev_access_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed full (read and
- # write) access to this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed full (read
+ # and write) access to this account.
allow_full_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_full_access_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed read access to
- # the logs in CloudTrail, AWS Config, and CloudWatch for this account. If
+ # A list of IAM ARNs from other AWS accounts that will be allowed read access
+ # to the logs in CloudTrail, AWS Config, and CloudWatch for this account. If
# var.cloudtrail_kms_key_arn is specified, will also be given permissions to
# decrypt with the KMS CMK that is used to encrypt CloudTrail logs.
allow_logs_access_from_other_account_arns = []
- # A list of IAM ARNs from other AWS accounts that will be allowed read-only access
- # to this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed read-only
+ # access to this account.
allow_read_only_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_read_only_access_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed read access to
- # IAM groups and publish SSH keys. This is used for ssh-grunt.
+ # A list of IAM ARNs from other AWS accounts that will be allowed read access
+ # to IAM groups and publish SSH keys. This is used for ssh-grunt.
allow_ssh_grunt_access_from_other_account_arns = []
- # A list of IAM ARNs from other AWS accounts that will be allowed access to AWS
- # support for this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed access to
+ # AWS support for this account.
allow_support_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_support_access_iam_role_permissions_boundary = null
- # A list of IAM permissions (e.g. ec2:*) that will be added to an IAM Group for
- # doing automated deployments. NOTE: If var.should_create_iam_group_auto_deploy is
- # true, the list must have at least one element (e.g. '*').
+ # A list of IAM permissions (e.g. ec2:*) that will be added to an IAM Group
+ # for doing automated deployments. NOTE: If
+ # var.should_create_iam_group_auto_deploy is true, the list must have at least
+ # one element (e.g. '*').
auto_deploy_permissions = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
aws_config_iam_role_permissions_boundary = null
- # Additional IAM policies to apply to cloudtrail S3 bucket. You can use this to
- # grant read/write access beyond what is provided to Cloudtrail. This should be a
- # map, where each key is a unique statement ID (SID), and each value is an object
- # that contains the parameters defined in the comment below.
+ # Additional IAM policies to apply to cloudtrail S3 bucket. You can use this
+ # to grant read/write access beyond what is provided to Cloudtrail. This
+ # should be a map, where each key is a unique statement ID (SID), and each
+ # value is an object that contains the parameters defined in the comment
+ # below.
cloudtrail_additional_bucket_policy_statements = null
- # Map of advanced event selector name to list of field selectors to apply for that
- # event selector. Advanced event selectors allow for more fine grained data
- # logging of events.
+ # Map of advanced event selector name to list of field selectors to apply for
+ # that event selector. Advanced event selectors allow for more fine grained
+ # data logging of events.
Note that you can not configure basic data logging
# (var.cloudtrail_data_logging_enabled) if advanced event logging is
# enabled.
-Refer to the AWS docs on data event selection for more details on the
- # difference between basic data logging and advanced data logging.
-
+Refer to the AWS docs on data event selection for more details on
+ # the difference between basic data logging and advanced data logging.
cloudtrail_advanced_event_selectors = {}
# Whether or not to allow kms:DescribeKey to external AWS accounts with write
- # access to the CloudTrail bucket. This is useful during deployment so that you
- # don't have to pass around the KMS key ARN.
+ # access to the CloudTrail bucket. This is useful during deployment so that
+ # you don't have to pass around the KMS key ARN.
cloudtrail_allow_kms_describe_key_to_external_aws_accounts = false
- # Specify the name of the CloudWatch Logs group to publish the CloudTrail logs to.
- # This log group exists in the current account. Set this value to `null` to avoid
- # publishing the trail logs to the logs group. The recommended configuration for
- # CloudTrail is (a) for each child account to aggregate its logs in an S3 bucket
- # in a single central account, such as a logs account and (b) to also store 14
- # days work of logs in CloudWatch in the child account itself for local debugging.
+ # Specify the name of the CloudWatch Logs group to publish the CloudTrail logs
+ # to. This log group exists in the current account. Set this value to `null`
+ # to avoid publishing the trail logs to the logs group. The recommended
+ # configuration for CloudTrail is (a) for each child account to aggregate its
+ # logs in an S3 bucket in a single central account, such as a logs account and
+ # (b) to also store 14 days work of logs in CloudWatch in the child account
+ # itself for local debugging.
cloudtrail_cloudwatch_logs_group_name = "cloudtrail-logs"
# If true, logging of data events will be enabled.
cloudtrail_data_logging_enabled = false
- # Specify if you want your event selector to include management events for your
- # trail.
+ # Specify if you want your event selector to include management events for
+ # your trail.
cloudtrail_data_logging_include_management_events = true
- # Specify if you want your trail to log read-only events, write-only events, or
- # all. Possible values are: ReadOnly, WriteOnly, All.
+ # Specify if you want your trail to log read-only events, write-only events,
+ # or all. Possible values are: ReadOnly, WriteOnly, All.
cloudtrail_data_logging_read_write_type = "All"
- # Data resources for which to log data events. This should be a map, where each
- # key is a data resource type, and each value is a list of data resource values.
- # Possible values for data resource types are: AWS::S3::Object,
- # AWS::Lambda::Function and AWS::DynamoDB::Table. See the 'data_resource' block
- # within the 'event_selector' block of the 'aws_cloudtrail' resource for context:
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # trail#data_resource.
+ # Data resources for which to log data events. This should be a map, where
+ # each key is a data resource type, and each value is a list of data resource
+ # values. Possible values for data resource types are: AWS::S3::Object,
+ # AWS::Lambda::Function and AWS::DynamoDB::Table. See the 'data_resource'
+ # block within the 'event_selector' block of the 'aws_cloudtrail' resource for
+ # context:
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudtrail#data_resource.
cloudtrail_data_logging_resources = {}
- # Whether or not to enable automatic annual rotation of the KMS key. Defaults to
- # true.
+ # Whether or not to enable automatic annual rotation of the KMS key. Defaults
+ # to true.
cloudtrail_enable_key_rotation = true
- # If set to true, when you run 'terraform destroy', delete all objects from the
- # bucket so that the bucket can be destroyed without error. Warning: these objects
- # are not recoverable so only use this if you're absolutely sure you want to
- # permanently delete everything!
+ # If set to true, when you run 'terraform destroy', delete all objects from
+ # the bucket so that the bucket can be destroyed without error. Warning: these
+ # objects are not recoverable so only use this if you're absolutely sure you
+ # want to permanently delete everything!
cloudtrail_force_destroy = false
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
cloudtrail_iam_role_permissions_boundary = null
- # Specifies whether the trail is an AWS Organizations trail. Organization trails
- # log events for the root account and all member accounts. Can only be created in
- # the organization root account. (true or false)
+ # Specifies whether the trail is an AWS Organizations trail. Organization
+ # trails log events for the root account and all member accounts. Can only be
+ # created in the organization root account. (true or false)
cloudtrail_is_organization_trail = false
# All CloudTrail Logs will be encrypted with a KMS Key (a Customer Master Key)
- # that governs access to write API calls older than 7 days and all read API calls.
- # The IAM Users specified in this list will have rights to change who can access
- # this extended log data. Note that if you specify a logs account (by setting
- # is_logs_account = true on one of the accounts in var.child_accounts), the KMS
- # CMK will be created in that account, and the root of that account will
- # automatically be made an admin of the CMK.
+ # that governs access to write API calls older than 7 days and all read API
+ # calls. The IAM Users specified in this list will have rights to change who
+ # can access this extended log data. Note that if you specify a logs account
+ # (by setting is_logs_account = true on one of the accounts in
+ # var.child_accounts), the KMS CMK will be created in that account, and the
+ # root of that account will automatically be made an admin of the CMK.
cloudtrail_kms_key_administrator_iam_arns = []
- # All CloudTrail Logs will be encrypted with a KMS CMK (Customer Master Key) that
- # governs access to write API calls older than 7 days and all read API calls. If
- # that CMK already exists, set this to the ARN of that CMK. Otherwise, set this to
- # null, and a new CMK will be created. If you set is_logs_account to true on one
- # of the accounts in var.child_accounts, the KMS CMK will be created in that
- # account (this is the recommended approach!).
+ # All CloudTrail Logs will be encrypted with a KMS CMK (Customer Master Key)
+ # that governs access to write API calls older than 7 days and all read API
+ # calls. If that CMK already exists, set this to the ARN of that CMK.
+ # Otherwise, set this to null, and a new CMK will be created. If you set
+ # is_logs_account to true on one of the accounts in var.child_accounts, the
+ # KMS CMK will be created in that account (this is the recommended approach!).
cloudtrail_kms_key_arn = null
- # If the kms_key_arn provided is an alias or alias ARN, then this must be set to
- # true so that the module will exchange the alias for a CMK ARN. Setting this to
- # true and using aliases requires
- # var.cloudtrail_allow_kms_describe_key_to_external_aws_accounts to also be true
- # for multi-account scenarios.
+ # If the kms_key_arn provided is an alias or alias ARN, then this must be set
+ # to true so that the module will exchange the alias for a CMK ARN. Setting
+ # this to true and using aliases requires
+ # var.cloudtrail_allow_kms_describe_key_to_external_aws_accounts to also be
+ # true for multi-account scenarios.
cloudtrail_kms_key_arn_is_alias = false
- # Additional service principals beyond CloudTrail that should have access to the
- # KMS key used to encrypt the logs. This is useful for granting access to the logs
- # for the purposes of constructing metric filters.
+ # Additional service principals beyond CloudTrail that should have access to
+ # the KMS key used to encrypt the logs. This is useful for granting access to
+ # the logs for the purposes of constructing metric filters.
cloudtrail_kms_key_service_principals = []
# All CloudTrail Logs will be encrypted with a KMS Key (a Customer Master Key)
- # that governs access to write API calls older than 7 days and all read API calls.
- # The IAM Users specified in this list will have read-only access to this extended
- # log data.
+ # that governs access to write API calls older than 7 days and all read API
+ # calls. The IAM Users specified in this list will have read-only access to
+ # this extended log data.
cloudtrail_kms_key_user_iam_arns = []
- # After this number of days, log files should be transitioned from S3 to Glacier.
- # Enter 0 to never archive log data.
+ # After this number of days, log files should be transitioned from S3 to
+ # Glacier. Enter 0 to never archive log data.
cloudtrail_num_days_after_which_archive_log_data = 30
- # After this number of days, log files should be deleted from S3. Enter 0 to never
- # delete log data.
+ # After this number of days, log files should be deleted from S3. Enter 0 to
+ # never delete log data.
cloudtrail_num_days_after_which_delete_log_data = 365
- # After this number of days, logs stored in CloudWatch will be deleted. Possible
- # values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827,
- # 3653, and 0 (default). When set to 0, logs will be retained indefinitely.
+ # After this number of days, logs stored in CloudWatch will be deleted.
+ # Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400,
+ # 545, 731, 1827, 3653, and 0 (default). When set to 0, logs will be retained
+ # indefinitely.
cloudtrail_num_days_to_retain_cloudwatch_logs = 0
- # The ID of the organization. Required only if an organization wide CloudTrail is
- # being setup and `create_organization` is set to false. The organization ID is
- # required to ensure that the entire organization is whitelisted in the CloudTrail
- # bucket write policy.
+ # The ID of the organization. Required only if an organization wide CloudTrail
+ # is being setup and `create_organization` is set to false. The organization
+ # ID is required to ensure that the entire organization is whitelisted in the
+ # CloudTrail bucket write policy.
cloudtrail_organization_id = null
- # The name of the S3 Bucket where CloudTrail logs will be stored. This could be a
- # bucket in this AWS account or the name of a bucket in another AWS account where
- # CloudTrail logs should be sent. If you set is_logs_account on one of the
- # accounts in var.child_accounts, the S3 bucket will be created in that account
- # (this is the recommended approach!).
+ # The name of the S3 Bucket where CloudTrail logs will be stored. This could
+ # be a bucket in this AWS account or the name of a bucket in another AWS
+ # account where CloudTrail logs should be sent. If you set is_logs_account on
+ # one of the accounts in var.child_accounts, the S3 bucket will be created in
+ # that account (this is the recommended approach!).
cloudtrail_s3_bucket_name = null
# Enable MFA delete for either 'Change the versioning state of your bucket' or
- # 'Permanently delete an object version'. This setting only applies to the bucket
- # used to storage Cloudtrail data. This cannot be used to toggle this setting but
- # is available to allow managed buckets to reflect the state in AWS. For
- # instructions on how to enable MFA Delete, check out the README from the
- # terraform-aws-security/private-s3-bucket module.
+ # 'Permanently delete an object version'. This setting only applies to the
+ # bucket used to storage Cloudtrail data. This cannot be used to toggle this
+ # setting but is available to allow managed buckets to reflect the state in
+ # AWS. For instructions on how to enable MFA Delete, check out the README
+ # from the terraform-aws-security/private-s3-bucket module.
cloudtrail_s3_mfa_delete = false
# If true, create an S3 bucket of name var.cloudtrail_s3_bucket_name for
- # CloudTrail logs, either in the logs account—the account in var.child_accounts
- # that has is_logs_account set to true (this is the recommended approach!)—or in
- # this account if none of the child accounts are marked as a logs account. If
- # false, assume var.cloudtrail_s3_bucket_name is an S3 bucket that already exists.
- # We recommend setting this to true and setting is_logs_account to true on one of
- # the accounts in var.child_accounts to use that account as a logs account where
- # you aggregate all your CloudTrail data. In case you want to disable the
- # CloudTrail module and the S3 bucket, you need to set both var.enable_cloudtrail
- # and cloudtrail_should_create_s3_bucket to false.
+ # CloudTrail logs, either in the logs account—the account in
+ # var.child_accounts that has is_logs_account set to true (this is the
+ # recommended approach!)—or in this account if none of the child accounts are
+ # marked as a logs account. If false, assume var.cloudtrail_s3_bucket_name is
+ # an S3 bucket that already exists. We recommend setting this to true and
+ # setting is_logs_account to true on one of the accounts in var.child_accounts
+ # to use that account as a logs account where you aggregate all your
+ # CloudTrail data. In case you want to disable the CloudTrail module and the
+ # S3 bucket, you need to set both var.enable_cloudtrail and
+ # cloudtrail_should_create_s3_bucket to false.
cloudtrail_should_create_s3_bucket = true
# Tags to apply to the CloudTrail resources.
cloudtrail_tags = {}
# Set to true to send the AWS Config data to another account (e.g., a logs
- # account) for aggregation purposes. You must set the ID of that other account via
- # the config_central_account_id variable. Note that if one of the accounts in
- # var.child_accounts has is_logs_account set to true (this is the approach we
- # recommended!), this variable will be assumed to be true, so you don't have to
- # pass any value for it. This redundant variable has to exist because Terraform
- # does not allow computed data in count and for_each parameters and
- # var.config_central_account_id may be computed if its the ID of a newly-created
- # AWS account.
+ # account) for aggregation purposes. You must set the ID of that other account
+ # via the config_central_account_id variable. Note that if one of the accounts
+ # in var.child_accounts has is_logs_account set to true (this is the approach
+ # we recommended!), this variable will be assumed to be true, so you don't
+ # have to pass any value for it. This redundant variable has to exist because
+ # Terraform does not allow computed data in count and for_each parameters and
+ # var.config_central_account_id may be computed if its the ID of a
+ # newly-created AWS account.
config_aggregate_config_data_in_external_account = false
# If the S3 bucket and SNS topics used for AWS Config live in a different AWS
- # account, set this variable to the ID of that account. If the S3 bucket and SNS
- # topics live in this account, set this variable to an empty string. Note that if
- # one of the accounts in var.child_accounts has is_logs_account set to true (this
- # is the approach we recommended!), that account's ID will be used automatically,
- # and you can leave this variable null.
+ # account, set this variable to the ID of that account. If the S3 bucket and
+ # SNS topics live in this account, set this variable to an empty string. Note
+ # that if one of the accounts in var.child_accounts has is_logs_account set to
+ # true (this is the approach we recommended!), that account's ID will be used
+ # automatically, and you can leave this variable null.
config_central_account_id = ""
- # Set to true to create account-level AWS Config rules directly in this account.
- # Set false to create org-level rules that apply to this account and all child
- # accounts. We recommend setting this to true to use account-level rules because
- # org-level rules create a chicken-and-egg problem with creating new accounts (see
- # this module's README for details).
+ # Set to true to create account-level AWS Config rules directly in this
+ # account. Set false to create org-level rules that apply to this account and
+ # all child accounts. We recommend setting this to true to use account-level
+ # rules because org-level rules create a chicken-and-egg problem with creating
+ # new accounts (see this module's README for details).
config_create_account_rules = true
# Optional KMS key to use for encrypting S3 objects on the AWS Config delivery
- # channel for an externally managed S3 bucket. This must belong to the same region
- # as the destination S3 bucket. If null, AWS Config will default to encrypting the
- # delivered data with AES-256 encryption. Only used if var.should_create_s3_bucket
- # is false - otherwise, var.config_s3_bucket_kms_key_arn is used.
+ # channel for an externally managed S3 bucket. This must belong to the same
+ # region as the destination S3 bucket. If null, AWS Config will default to
+ # encrypting the delivered data with AES-256 encryption. Only used if
+ # var.should_create_s3_bucket is false - otherwise,
+ # var.config_s3_bucket_kms_key_arn is used.
config_delivery_channel_kms_key_arn = null
- # If set to true, when you run 'terraform destroy', delete all objects from the
- # bucket so that the bucket can be destroyed without error. Warning: these objects
- # are not recoverable so only use this if you're absolutely sure you want to
- # permanently delete everything!
+ # If set to true, when you run 'terraform destroy', delete all objects from
+ # the bucket so that the bucket can be destroyed without error. Warning: these
+ # objects are not recoverable so only use this if you're absolutely sure you
+ # want to permanently delete everything!
config_force_destroy = false
- # After this number of days, log files should be transitioned from S3 to Glacier.
- # Enter 0 to never archive log data.
+ # After this number of days, log files should be transitioned from S3 to
+ # Glacier. Enter 0 to never archive log data.
config_num_days_after_which_archive_log_data = 365
- # After this number of days, log files should be deleted from S3. Enter 0 to never
- # delete log data.
+ # After this number of days, log files should be deleted from S3. Enter 0 to
+ # never delete log data.
config_num_days_after_which_delete_log_data = 730
- # Optional KMS key (in logs account) to use for encrypting S3 objects on the AWS
- # Config bucket, when the S3 bucket is created within this module
+ # Optional KMS key (in logs account) to use for encrypting S3 objects on the
+ # AWS Config bucket, when the S3 bucket is created within this module
# (var.config_should_create_s3_bucket is true). For encrypting S3 objects on
# delivery for an externally managed S3 bucket, refer to the
- # var.config_delivery_channel_kms_key_arn input variable. If null, data in S3 will
- # be encrypted using the default aws/s3 key. If provided, the key policy of the
- # provided key must permit the IAM role used by AWS Config. See
- # https://docs.aws.amazon.com/sns/latest/dg/sns-key-management.html. Note that the
- # KMS key must reside in the global recorder region (as configured by
+ # var.config_delivery_channel_kms_key_arn input variable. If null, data in S3
+ # will be encrypted using the default aws/s3 key. If provided, the key policy
+ # of the provided key must permit the IAM role used by AWS Config. See
+ # https://docs.aws.amazon.com/sns/latest/dg/sns-key-management.html. Note that
+ # the KMS key must reside in the global recorder region (as configured by
# var.aws_region).
config_s3_bucket_kms_key_arn = null
# The name of the S3 Bucket where Config items will be stored. This could be a
- # bucket in this AWS account or the name of a bucket in another AWS account where
- # Config items should be sent. If you set is_logs_account to true on one of the
- # accounts in var.child_accounts, the S3 bucket will be created in that account
- # (this is the recommended approach!).
+ # bucket in this AWS account or the name of a bucket in another AWS account
+ # where Config items should be sent. If you set is_logs_account to true on one
+ # of the accounts in var.child_accounts, the S3 bucket will be created in that
+ # account (this is the recommended approach!).
config_s3_bucket_name = null
# Enable MFA delete for either 'Change the versioning state of your bucket' or
- # 'Permanently delete an object version'. This setting only applies to the bucket
- # used to storage AWS Config data. This cannot be used to toggle this setting but
- # is available to allow managed buckets to reflect the state in AWS. For
- # instructions on how to enable MFA Delete, check out the README from the
- # terraform-aws-security/private-s3-bucket module.
+ # 'Permanently delete an object version'. This setting only applies to the
+ # bucket used to storage AWS Config data. This cannot be used to toggle this
+ # setting but is available to allow managed buckets to reflect the state in
+ # AWS. For instructions on how to enable MFA Delete, check out the README from
+ # the terraform-aws-security/private-s3-bucket module.
config_s3_mfa_delete = false
- # If true, create an S3 bucket of name var.config_s3_bucket_name for AWS Config
- # data, either in the logs account—the account in var.child_accounts that has
- # is_logs_account set to true (this is the recommended approach!)—or in this
- # account if none of the child accounts are marked as a logs account. If false,
- # assume var.config_s3_bucket_name is an S3 bucket that already exists. We
- # recommend setting this to true and setting is_logs_account to true on one of the
- # accounts in var.child_accounts to use that account as a logs account where you
- # aggregate all your AWS Config data. In case you want to disable the AWS Config
- # module and the S3 bucket, you need to set both var.enable_config and
- # config_should_create_s3_bucket to false.
+ # If true, create an S3 bucket of name var.config_s3_bucket_name for AWS
+ # Config data, either in the logs account—the account in var.child_accounts
+ # that has is_logs_account set to true (this is the recommended approach!)—or
+ # in this account if none of the child accounts are marked as a logs account.
+ # If false, assume var.config_s3_bucket_name is an S3 bucket that already
+ # exists. We recommend setting this to true and setting is_logs_account to
+ # true on one of the accounts in var.child_accounts to use that account as a
+ # logs account where you aggregate all your AWS Config data. In case you want
+ # to disable the AWS Config module and the S3 bucket, you need to set both
+ # var.enable_config and config_should_create_s3_bucket to false.
config_should_create_s3_bucket = true
# Set to true to create an SNS topic in this account for sending AWS Config
# notifications. Set to false to assume the topic specified in
- # var.config_sns_topic_name already exists in another AWS account (e.g the logs
- # account).
+ # var.config_sns_topic_name already exists in another AWS account (e.g the
+ # logs account).
config_should_create_sns_topic = false
- # Optional KMS key to use for each region for configuring default encryption for
- # the SNS topic (encoded as a map from region - e.g. us-east-1 - to ARN of KMS
- # key). If null or the region key is missing, encryption will not be configured
- # for the SNS topic in that region.
+ # Optional KMS key to use for each region for configuring default encryption
+ # for the SNS topic (encoded as a map from region - e.g. us-east-1 - to ARN of
+ # KMS key). If null or the region key is missing, encryption will not be
+ # configured for the SNS topic in that region.
config_sns_topic_kms_key_region_map = null
- # The name of the SNS Topic in where AWS Config notifications will be sent. Can be
- # in the same account or in another account.
+ # The name of the SNS Topic in where AWS Config notifications will be sent.
+ # Can be in the same account or in another account.
config_sns_topic_name = "ConfigTopic"
- # A map of tags to apply to the S3 Bucket. The key is the tag name and the value
- # is the tag value.
+ # A map of tags to apply to the S3 Bucket. The key is the tag name and the
+ # value is the tag value.
config_tags = {}
# List of AWS account identifiers to exclude from org-level Config rules. Only
# used if var.config_create_account_rules is false (not recommended).
configrules_excluded_accounts = []
- # The maximum frequency with which AWS Config runs evaluations for the ´PERIODIC´
- # rules. See
- # https://www.terraform.io/docs/providers/aws/r/config_organization_managed_rule.h
- # ml#maximum_execution_frequency
+ # The maximum frequency with which AWS Config runs evaluations for the
+ # ´PERIODIC´ rules. See
+ # https://www.terraform.io/docs/providers/aws/r/config_organization_managed_rule.html#maximum_execution_frequency
configrules_maximum_execution_frequency = "TwentyFour_Hours"
# Set to true to create/configure AWS Organizations for the first time in this
- # account. If you already configured AWS Organizations in your account, set this
- # to false; alternatively, you could set it to true and run 'terraform import' to
- # import you existing Organization.
+ # account. If you already configured AWS Organizations in your account, set
+ # this to false; alternatively, you could set it to true and run 'terraform
+ # import' to import you existing Organization.
create_organization = true
- # The name of the IAM group that will grant access to all external AWS accounts in
- # var.iam_groups_for_cross_account_access.
+ # The name of the IAM group that will grant access to all external AWS
+ # accounts in var.iam_groups_for_cross_account_access.
cross_account_access_all_group_name = "_all-accounts"
# A list of AWS services for which the developers from the accounts in
- # var.allow_dev_access_from_other_account_arns will receive full permissions. See
- # https://goo.gl/ZyoHlz to find the IAM Service name. For example, to grant
- # developers access only to EC2 and Amazon Machine Learning, use the value
- # ["ec2","machinelearning"]. Do NOT add iam to the list of services, or that will
- # grant Developers de facto admin access.
+ # var.allow_dev_access_from_other_account_arns will receive full permissions.
+ # See https://goo.gl/ZyoHlz to find the IAM Service name. For example, to
+ # grant developers access only to EC2 and Amazon Machine Learning, use the
+ # value ["ec2","machinelearning"]. Do NOT add iam to the list of services, or
+ # that will grant Developers de facto admin access.
dev_permitted_services = []
- # If set to true (default), all new EBS volumes will have encryption enabled by
- # default
+ # If set to true (default), all new EBS volumes will have encryption enabled
+ # by default
ebs_enable_encryption = true
- # Optional map of region names to KMS keys to use for EBS volume encryption when
- # var.ebs_use_existing_kms_keys is enabled.
+ # Optional map of region names to KMS keys to use for EBS volume encryption
+ # when var.ebs_use_existing_kms_keys is enabled.
ebs_kms_key_arns = {}
# If set to true, the KMS Customer Managed Keys (CMK) specified in
- # var.ebs_kms_key_arns will be set as the default for EBS encryption. When false
- # (default), the AWS-managed aws/ebs key will be used.
+ # var.ebs_kms_key_arns will be set as the default for EBS encryption. When
+ # false (default), the AWS-managed aws/ebs key will be used.
ebs_use_existing_kms_keys = false
- # Set to true to enable CloudTrail in the root account. Set to false to disable
- # CloudTrail (note: all other CloudTrail variables will be ignored). In case you
- # want to disable the CloudTrail module and the S3 bucket, you need to set both
- # var.enable_cloudtrail and cloudtrail_should_create_s3_bucket to false.
+ # Set to true to enable CloudTrail in the root account. Set to false to
+ # disable CloudTrail (note: all other CloudTrail variables will be ignored).
+ # In case you want to disable the CloudTrail module and the S3 bucket, you
+ # need to set both var.enable_cloudtrail and
+ # cloudtrail_should_create_s3_bucket to false.
enable_cloudtrail = true
- # Enables S3 server access logging which sends detailed records for the requests
- # that are made to the bucket. Defaults to false.
+ # Enables S3 server access logging which sends detailed records for the
+ # requests that are made to the bucket. Defaults to false.
enable_cloudtrail_s3_server_access_logging = false
- # Set to true to enable AWS Config in the root account. Set to false to disable
- # AWS Config (note: all other AWS config variables will be ignored). In case you
- # want to disable the CloudTrail module and the S3 bucket, you need to set both
- # var.enable_cloudtrail and cloudtrail_should_create_s3_bucket to false.
+ # Set to true to enable AWS Config in the root account. Set to false to
+ # disable AWS Config (note: all other AWS config variables will be ignored).
+ # In case you want to disable the CloudTrail module and the S3 bucket, you
+ # need to set both var.enable_cloudtrail and
+ # cloudtrail_should_create_s3_bucket to false.
enable_config = true
# Checks whether the EBS volumes that are in an attached state are encrypted.
@@ -576,8 +584,8 @@ Refer to the AWS docs on data event selection for more details on the
# When true, create an Open ID Connect Provider that GitHub actions can use to
# assume IAM roles in the account. Refer to
- # https://docs.github.com/en/actions/deployment/security-hardening-your-deployment
- # /configuring-openid-connect-in-amazon-web-services for more information.
+ # https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services
+ # for more information.
enable_github_actions_access = false
# A feature flag to enable or disable this module.
@@ -593,15 +601,15 @@ Refer to the AWS docs on data event selection for more details on the
# requirements.
enable_iam_password_policy = true
- # Checks whether the security group with 0.0.0.0/0 of any Amazon Virtual Private
- # Cloud (Amazon VPC) allows only specific inbound TCP or UDP traffic.
+ # Checks whether the security group with 0.0.0.0/0 of any Amazon Virtual
+ # Private Cloud (Amazon VPC) allows only specific inbound TCP or UDP traffic.
enable_insecure_sg_rules = true
# Checks whether storage encryption is enabled for your RDS DB instances.
enable_rds_storage_encrypted = true
- # Checks whether users of your AWS account require a multi-factor authentication
- # (MFA) device to sign in with root credentials.
+ # Checks whether users of your AWS account require a multi-factor
+ # authentication (MFA) device to sign in with root credentials.
enable_root_account_mfa = true
# Checks that your Amazon S3 buckets do not allow public read access.
@@ -615,15 +623,16 @@ Refer to the AWS docs on data event selection for more details on the
encrypted_volumes_kms_id = null
# When destroying this user, destroy even if it has non-Terraform-managed IAM
- # access keys, login profile, or MFA devices. Without force_destroy a user with
- # non-Terraform-managed access keys and login profile will fail to be destroyed.
+ # access keys, login profile, or MFA devices. Without force_destroy a user
+ # with non-Terraform-managed access keys and login profile will fail to be
+ # destroyed.
force_destroy_users = false
- # When set, use the statically provided hardcoded list of thumbprints rather than
- # looking it up dynamically. This is useful if you want to trade reliability of
- # the OpenID Connect Provider across certificate renewals with a static list that
- # is obtained using a trustworthy mechanism, to mitigate potential damage from a
- # domain hijacking attack on GitHub domains.
+ # When set, use the statically provided hardcoded list of thumbprints rather
+ # than looking it up dynamically. This is useful if you want to trade
+ # reliability of the OpenID Connect Provider across certificate renewals with
+ # a static list that is obtained using a trustworthy mechanism, to mitigate
+ # potential damage from a domain hijacking attack on GitHub domains.
github_actions_openid_connect_provider_thumbprint_list = null
# Name of the Cloudwatch event rules.
@@ -632,9 +641,9 @@ Refer to the AWS docs on data event selection for more details on the
# Specifies the frequency of notifications sent for subsequent finding
# occurrences. If the detector is a GuardDuty member account, the value is
# determined by the GuardDuty master account and cannot be modified, otherwise
- # defaults to SIX_HOURS. For standalone and GuardDuty master accounts, it must be
- # configured in Terraform to enable drift detection. Valid values for standalone
- # and master accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS.
+ # defaults to SIX_HOURS. For standalone and GuardDuty master accounts, it must
+ # be configured in Terraform to enable drift detection. Valid values for
+ # standalone and master accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS.
guardduty_finding_publishing_frequency = null
# Specifies a name for the created SNS topics where findings are published.
@@ -647,42 +656,42 @@ Refer to the AWS docs on data event selection for more details on the
# The name of the IAM Access Analyzer module
iam_access_analyzer_name = "baseline_root-iam_access_analyzer"
- # If set to ORGANIZATION, the analyzer will be scanning the current organization
- # and any policies that refer to linked resources such as S3, IAM, Lambda and SQS
- # policies.
+ # If set to ORGANIZATION, the analyzer will be scanning the current
+ # organization and any policies that refer to linked resources such as S3,
+ # IAM, Lambda and SQS policies.
iam_access_analyzer_type = "ORGANIZATION"
# A list of AWS services for which the developers IAM Group will receive full
# permissions. See https://goo.gl/ZyoHlz to find the IAM Service name. For
- # example, to grant developers access only to EC2 and Amazon Machine Learning, use
- # the value ["ec2","machinelearning"]. Do NOT add iam to the list of services, or
- # that will grant Developers de facto admin access. If you need to grant iam
- # privileges, just grant the user Full Access.
+ # example, to grant developers access only to EC2 and Amazon Machine Learning,
+ # use the value ["ec2","machinelearning"]. Do NOT add iam to the list of
+ # services, or that will grant Developers de facto admin access. If you need
+ # to grant iam privileges, just grant the user Full Access.
iam_group_developers_permitted_services = []
- # The list of names to be used for the IAM Group that enables its members to SSH
- # as a sudo user into any server configured with the ssh-grunt Gruntwork module.
- # Pass in multiple to configure multiple different IAM groups to control different
- # groupings of access at the server level. Pass in empty list to disable creation
- # of the IAM groups.
+ # The list of names to be used for the IAM Group that enables its members to
+ # SSH as a sudo user into any server configured with the ssh-grunt Gruntwork
+ # module. Pass in multiple to configure multiple different IAM groups to
+ # control different groupings of access at the server level. Pass in empty
+ # list to disable creation of the IAM groups.
iam_group_names_ssh_grunt_sudo_users = []
# The name to be used for the IAM Group that enables its members to SSH as a
- # non-sudo user into any server configured with the ssh-grunt Gruntwork module.
- # Pass in multiple to configure multiple different IAM groups to control different
- # groupings of access at the server level. Pass in empty list to disable creation
- # of the IAM groups.
+ # non-sudo user into any server configured with the ssh-grunt Gruntwork
+ # module. Pass in multiple to configure multiple different IAM groups to
+ # control different groupings of access at the server level. Pass in empty
+ # list to disable creation of the IAM groups.
iam_group_names_ssh_grunt_users = []
- # This variable is used to create groups that allow IAM users to assume roles in
- # your other AWS accounts. It should be a list of objects, where each object has
- # the fields 'group_name', which will be used as the name of the IAM group, and
- # 'iam_role_arns', which is a list of ARNs of IAM Roles that you can assume when
- # part of that group. For each entry in the list of objects, we will create an IAM
- # group that allows users to assume the given IAM role(s) in the other AWS
- # account. This allows you to define all your IAM users in one account (e.g. the
- # users account) and to grant them access to certain IAM roles in other accounts
- # (e.g. the stage, prod, audit accounts).
+ # This variable is used to create groups that allow IAM users to assume roles
+ # in your other AWS accounts. It should be a list of objects, where each
+ # object has the fields 'group_name', which will be used as the name of the
+ # IAM group, and 'iam_role_arns', which is a list of ARNs of IAM Roles that
+ # you can assume when part of that group. For each entry in the list of
+ # objects, we will create an IAM group that allows users to assume the given
+ # IAM role(s) in the other AWS account. This allows you to define all your IAM
+ # users in one account (e.g. the users account) and to grant them access to
+ # certain IAM roles in other accounts (e.g. the stage, prod, audit accounts).
iam_groups_for_cross_account_access = []
# Allow users to change their own password.
@@ -715,32 +724,32 @@ Refer to the AWS docs on data event selection for more details on the
# The tags to apply to all the IAM role resources.
iam_role_tags = {}
- # Comma-separated list of TCP ports authorized to be open to 0.0.0.0/0. Ranges are
- # defined by a dash; for example, '443,1020-1025'.
+ # Comma-separated list of TCP ports authorized to be open to 0.0.0.0/0. Ranges
+ # are defined by a dash; for example, '443,1020-1025'.
insecure_sg_rules_authorized_tcp_ports = "443"
- # Comma-separated list of UDP ports authorized to be open to 0.0.0.0/0. Ranges are
- # defined by a dash; for example, '500,1020-1025'.
+ # Comma-separated list of UDP ports authorized to be open to 0.0.0.0/0. Ranges
+ # are defined by a dash; for example, '500,1020-1025'.
insecure_sg_rules_authorized_udp_ports = null
- # Specifies whether CloudTrail will log only API calls in the current region or in
- # all regions. (true or false)
+ # Specifies whether CloudTrail will log only API calls in the current region
+ # or in all regions. (true or false)
is_multi_region_trail = true
# List of AWS service principal names for which you want to enable integration
- # with your organization. Must have `organizations_feature_set` set to ALL. See
- # https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_servic
- # s.html
+ # with your organization. Must have `organizations_feature_set` set to ALL.
+ # See
+ # https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html
organizations_aws_service_access_principals = ["cloudtrail.amazonaws.com","config-multiaccountsetup.amazonaws.com","config.amazonaws.com","access-analyzer.amazonaws.com"]
# If set to ALLOW, the new account enables IAM users to access account billing
- # information if they have the required permissions. If set to DENY, then only the
- # root user of the new account can access account billing information.
+ # information if they have the required permissions. If set to DENY, then only
+ # the root user of the new account can access account billing information.
organizations_default_iam_user_access_to_billing = "ALLOW"
- # The name of an IAM role that Organizations automatically preconfigures in the
- # new member account. This role trusts the master account, allowing users in the
- # master account to assume the role, as permitted by the master account
+ # The name of an IAM role that Organizations automatically preconfigures in
+ # the new member account. This role trusts the master account, allowing users
+ # in the master account to assume the role, as permitted by the master account
# administrator.
organizations_default_role_name = "OrganizationAccountAccessRole"
@@ -748,8 +757,7 @@ Refer to the AWS docs on data event selection for more details on the
organizations_default_tags = {}
# List of Organizations policy types to enable in the Organization Root. See
- # https://docs.aws.amazon.com/organizations/latest/APIReference/API_EnablePolicyTy
- # e.html
+ # https://docs.aws.amazon.com/organizations/latest/APIReference/API_EnablePolicyType.html
organizations_enabled_policy_types = ["SERVICE_CONTROL_POLICY"]
# Specify `ALL` or `CONSOLIDATED_BILLING`.
@@ -763,67 +771,68 @@ Refer to the AWS docs on data event selection for more details on the
# storage encryption config rule.
rds_storage_encrypted_kms_id = null
- # Should we create the IAM Group for auto-deploy? Allows automated deployment by
- # granting the permissions specified in var.auto_deploy_permissions. (true or
- # false)
+ # Should we create the IAM Group for auto-deploy? Allows automated deployment
+ # by granting the permissions specified in var.auto_deploy_permissions. (true
+ # or false)
should_create_iam_group_auto_deploy = false
- # Should we create the IAM Group for billing? Allows read-write access to billing
- # features only. (true or false)
+ # Should we create the IAM Group for billing? Allows read-write access to
+ # billing features only. (true or false)
should_create_iam_group_billing = true
- # Should we create the IAM Group for developers? The permissions of that group are
- # specified via var.iam_group_developers_permitted_services. (true or false)
+ # Should we create the IAM Group for developers? The permissions of that group
+ # are specified via var.iam_group_developers_permitted_services. (true or
+ # false)
should_create_iam_group_developers = false
- # Should we create the IAM Group for full access? Allows full access to all AWS
- # resources. (true or false)
+ # Should we create the IAM Group for full access? Allows full access to all
+ # AWS resources. (true or false)
should_create_iam_group_full_access = true
# Should we create the IAM Group for logs? Allows read access to logs in
# CloudTrail, AWS Config, and CloudWatch. If var.cloudtrail_kms_key_arn is
- # specified, will also be given permissions to decrypt with the KMS CMK that is
- # used to encrypt CloudTrail logs. (true or false)
+ # specified, will also be given permissions to decrypt with the KMS CMK that
+ # is used to encrypt CloudTrail logs. (true or false)
should_create_iam_group_logs = false
- # Should we create the IAM Group for read-only? Allows read-only access to all AWS
- # resources. (true or false)
+ # Should we create the IAM Group for read-only? Allows read-only access to all
+ # AWS resources. (true or false)
should_create_iam_group_read_only = false
- # Should we create the IAM Group for support? Allows access to AWS support. (true
- # or false)
+ # Should we create the IAM Group for support? Allows access to AWS support.
+ # (true or false)
should_create_iam_group_support = true
- # Should we create the IAM Group for use-existing-iam-roles? Allow launching AWS
- # resources with existing IAM Roles, but no ability to create new IAM Roles. (true
- # or false)
+ # Should we create the IAM Group for use-existing-iam-roles? Allow launching
+ # AWS resources with existing IAM Roles, but no ability to create new IAM
+ # Roles. (true or false)
should_create_iam_group_use_existing_iam_roles = false
- # Should we create the IAM Group for user self-management? Allows users to manage
- # their own IAM user accounts, but not other IAM users. (true or false)
+ # Should we create the IAM Group for user self-management? Allows users to
+ # manage their own IAM user accounts, but not other IAM users. (true or false)
should_create_iam_group_user_self_mgmt = false
- # Should we require that all IAM Users use Multi-Factor Authentication for both
- # AWS API calls and the AWS Web Console? (true or false)
+ # Should we require that all IAM Users use Multi-Factor Authentication for
+ # both AWS API calls and the AWS Web Console? (true or false)
should_require_mfa = true
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
# A map of users to create. The keys are the user names and the values are an
# object with the optional keys 'groups' (a list of IAM groups to add the user
- # to), 'tags' (a map of tags to apply to the user), 'pgp_key' (either a base-64
- # encoded PGP public key, or a keybase username in the form keybase:username, used
- # to encrypt the user's credentials; required if create_login_profile or
- # create_access_keys is true), 'create_login_profile' (if set to true, create a
- # password to login to the AWS Web Console), 'create_access_keys' (if set to true,
- # create access keys for the user), 'path' (the path), and 'permissions_boundary'
- # (the ARN of the policy that is used to set the permissions boundary for the
- # user).
+ # to), 'tags' (a map of tags to apply to the user), 'pgp_key' (either a
+ # base-64 encoded PGP public key, or a keybase username in the form
+ # keybase:username, used to encrypt the user's credentials; required if
+ # create_login_profile or create_access_keys is true), 'create_login_profile'
+ # (if set to true, create a password to login to the AWS Web Console),
+ # 'create_access_keys' (if set to true, create access keys for the user),
+ # 'path' (the path), and 'permissions_boundary' (the ARN of the policy that is
+ # used to set the permissions boundary for the user).
users = {}
}
@@ -841,7 +850,7 @@ Refer to the AWS docs on data event selection for more details on the
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/account-baseline-root?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/account-baseline-root?ref=v0.104.12"
}
inputs = {
@@ -858,82 +867,84 @@ inputs = {
# GuardDuty.
aws_region =
- # Map of child accounts to create. The map key is the name of the account and the
- # value is an object containing account configuration variables. See the comments
- # below for what keys and values this object should contain.
+ # Map of child accounts to create. The map key is the name of the account and
+ # the value is an object containing account configuration variables. See the
+ # comments below for what keys and values this object should contain.
child_accounts =
- # Creates resources in the specified regions. The best practice is to enable AWS
- # Config in all enabled regions in your AWS account. This variable must NOT be set
- # to null or empty. Otherwise, we won't know which regions to use and authenticate
- # to, and may use some not enabled in your AWS account (e.g., GovCloud, China,
- # etc). To get the list of regions enabled in your AWS account, you can use the
- # AWS CLI: aws ec2 describe-regions.
+ # Creates resources in the specified regions. The best practice is to enable
+ # AWS Config in all enabled regions in your AWS account. This variable must
+ # NOT be set to null or empty. Otherwise, we won't know which regions to use
+ # and authenticate to, and may use some not enabled in your AWS account (e.g.,
+ # GovCloud, China, etc). To get the list of regions enabled in your AWS
+ # account, you can use the AWS CLI: aws ec2 describe-regions.
config_opt_in_regions =
- # Creates resources in the specified regions. The best practice is to enable EBS
- # Encryption in all enabled regions in your AWS account. This variable must NOT be
- # set to null or empty. Otherwise, we won't know which regions to use and
- # authenticate to, and may use some not enabled in your AWS account (e.g.,
- # GovCloud, China, etc). To get the list of regions enabled in your AWS account,
- # you can use the AWS CLI: aws ec2 describe-regions. The value provided for
- # global_recorder_region must be in this list.
+ # Creates resources in the specified regions. The best practice is to enable
+ # EBS Encryption in all enabled regions in your AWS account. This variable
+ # must NOT be set to null or empty. Otherwise, we won't know which regions to
+ # use and authenticate to, and may use some not enabled in your AWS account
+ # (e.g., GovCloud, China, etc). To get the list of regions enabled in your AWS
+ # account, you can use the AWS CLI: aws ec2 describe-regions. The value
+ # provided for global_recorder_region must be in this list.
ebs_opt_in_regions =
# Creates resources in the specified regions. The best practice is to enable
- # GuardDuty in all enabled regions in your AWS account. This variable must NOT be
- # set to null or empty. Otherwise, we won't know which regions to use and
+ # GuardDuty in all enabled regions in your AWS account. This variable must NOT
+ # be set to null or empty. Otherwise, we won't know which regions to use and
# authenticate to, and may use some not enabled in your AWS account (e.g.,
- # GovCloud, China, etc). To get the list of regions enabled in your AWS account,
- # you can use the AWS CLI: aws ec2 describe-regions. The value provided for
- # global_recorder_region must be in this list.
+ # GovCloud, China, etc). To get the list of regions enabled in your AWS
+ # account, you can use the AWS CLI: aws ec2 describe-regions. The value
+ # provided for global_recorder_region must be in this list.
guardduty_opt_in_regions =
- # Creates resources in the specified regions. The best practice is to enable IAM
- # Access Analyzer in all enabled regions in your AWS account. This variable must
- # NOT be set to null or empty. Otherwise, we won't know which regions to use and
- # authenticate to, and may use some not enabled in your AWS account (e.g.,
- # GovCloud, China, etc). To get the list of regions enabled in your AWS account,
- # you can use the AWS CLI: aws ec2 describe-regions. The value provided for
- # global_recorder_region must be in this list.
+ # Creates resources in the specified regions. The best practice is to enable
+ # IAM Access Analyzer in all enabled regions in your AWS account. This
+ # variable must NOT be set to null or empty. Otherwise, we won't know which
+ # regions to use and authenticate to, and may use some not enabled in your AWS
+ # account (e.g., GovCloud, China, etc). To get the list of regions enabled in
+ # your AWS account, you can use the AWS CLI: aws ec2 describe-regions. The
+ # value provided for global_recorder_region must be in this list.
iam_access_analyzer_opt_in_regions =
- # The name used to prefix AWS Config and Cloudtrail resources, including the S3
- # bucket names and SNS topics used for each.
+ # The name used to prefix AWS Config and Cloudtrail resources, including the
+ # S3 bucket names and SNS topics used for each.
name_prefix =
# ----------------------------------------------------------------------------------------------------
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Map of additional managed rules to add. The key is the name of the rule (e.g.
- # ´acm-certificate-expiration-check´) and the value is an object specifying the
- # rule details
+ # Map of additional managed rules to add. The key is the name of the rule
+ # (e.g. ´acm-certificate-expiration-check´) and the value is an object
+ # specifying the rule details
additional_config_rules = {}
- # Map of github repositories to the list of branches that are allowed to assume
- # the IAM role. The repository should be encoded as org/repo-name (e.g.,
- # gruntwork-io/terrraform-aws-ci). Allows GitHub Actions to assume the auto deploy
- # IAM role using an OpenID Connect Provider for the given repositories. Refer to
- # the docs for github-actions-iam-role for more information. Note that this is
- # mutually exclusive with var.allow_auto_deploy_from_other_account_arns. Only used
- # if var.enable_github_actions_access is true.
+ # Map of github repositories to the list of branches that are allowed to
+ # assume the IAM role. The repository should be encoded as org/repo-name
+ # (e.g., gruntwork-io/terrraform-aws-ci). Allows GitHub Actions to assume the
+ # auto deploy IAM role using an OpenID Connect Provider for the given
+ # repositories. Refer to the docs for github-actions-iam-role for more
+ # information. Note that this is mutually exclusive with
+ # var.allow_auto_deploy_from_other_account_arns. Only used if
+ # var.enable_github_actions_access is true.
allow_auto_deploy_from_github_actions_for_sources = {}
- # A list of IAM ARNs from other AWS accounts that will be allowed to assume the
- # auto deploy IAM role that has the permissions in var.auto_deploy_permissions.
+ # A list of IAM ARNs from other AWS accounts that will be allowed to assume
+ # the auto deploy IAM role that has the permissions in
+ # var.auto_deploy_permissions.
allow_auto_deploy_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_auto_deploy_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed full (read and
- # write) access to the billing info for this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed full (read
+ # and write) access to the billing info for this account.
allow_billing_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_billing_access_iam_role_permissions_boundary = null
# If true, an IAM Policy that grants access to CloudTrail will be honored. If
@@ -941,374 +952,380 @@ inputs = {
# CloudTrail and any IAM Policy grants will be ignored. (true or false)
allow_cloudtrail_access_with_iam = true
- # A list of IAM ARNs from other AWS accounts that will be allowed full (read and
- # write) access to the services in this account specified in
+ # A list of IAM ARNs from other AWS accounts that will be allowed full (read
+ # and write) access to the services in this account specified in
# var.dev_permitted_services.
allow_dev_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_dev_access_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed full (read and
- # write) access to this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed full (read
+ # and write) access to this account.
allow_full_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_full_access_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed read access to
- # the logs in CloudTrail, AWS Config, and CloudWatch for this account. If
+ # A list of IAM ARNs from other AWS accounts that will be allowed read access
+ # to the logs in CloudTrail, AWS Config, and CloudWatch for this account. If
# var.cloudtrail_kms_key_arn is specified, will also be given permissions to
# decrypt with the KMS CMK that is used to encrypt CloudTrail logs.
allow_logs_access_from_other_account_arns = []
- # A list of IAM ARNs from other AWS accounts that will be allowed read-only access
- # to this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed read-only
+ # access to this account.
allow_read_only_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_read_only_access_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed read access to
- # IAM groups and publish SSH keys. This is used for ssh-grunt.
+ # A list of IAM ARNs from other AWS accounts that will be allowed read access
+ # to IAM groups and publish SSH keys. This is used for ssh-grunt.
allow_ssh_grunt_access_from_other_account_arns = []
- # A list of IAM ARNs from other AWS accounts that will be allowed access to AWS
- # support for this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed access to
+ # AWS support for this account.
allow_support_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_support_access_iam_role_permissions_boundary = null
- # A list of IAM permissions (e.g. ec2:*) that will be added to an IAM Group for
- # doing automated deployments. NOTE: If var.should_create_iam_group_auto_deploy is
- # true, the list must have at least one element (e.g. '*').
+ # A list of IAM permissions (e.g. ec2:*) that will be added to an IAM Group
+ # for doing automated deployments. NOTE: If
+ # var.should_create_iam_group_auto_deploy is true, the list must have at least
+ # one element (e.g. '*').
auto_deploy_permissions = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
aws_config_iam_role_permissions_boundary = null
- # Additional IAM policies to apply to cloudtrail S3 bucket. You can use this to
- # grant read/write access beyond what is provided to Cloudtrail. This should be a
- # map, where each key is a unique statement ID (SID), and each value is an object
- # that contains the parameters defined in the comment below.
+ # Additional IAM policies to apply to cloudtrail S3 bucket. You can use this
+ # to grant read/write access beyond what is provided to Cloudtrail. This
+ # should be a map, where each key is a unique statement ID (SID), and each
+ # value is an object that contains the parameters defined in the comment
+ # below.
cloudtrail_additional_bucket_policy_statements = null
- # Map of advanced event selector name to list of field selectors to apply for that
- # event selector. Advanced event selectors allow for more fine grained data
- # logging of events.
+ # Map of advanced event selector name to list of field selectors to apply for
+ # that event selector. Advanced event selectors allow for more fine grained
+ # data logging of events.
Note that you can not configure basic data logging
# (var.cloudtrail_data_logging_enabled) if advanced event logging is
# enabled.
-Refer to the AWS docs on data event selection for more details on the
- # difference between basic data logging and advanced data logging.
-
+Refer to the AWS docs on data event selection for more details on
+ # the difference between basic data logging and advanced data logging.
cloudtrail_advanced_event_selectors = {}
# Whether or not to allow kms:DescribeKey to external AWS accounts with write
- # access to the CloudTrail bucket. This is useful during deployment so that you
- # don't have to pass around the KMS key ARN.
+ # access to the CloudTrail bucket. This is useful during deployment so that
+ # you don't have to pass around the KMS key ARN.
cloudtrail_allow_kms_describe_key_to_external_aws_accounts = false
- # Specify the name of the CloudWatch Logs group to publish the CloudTrail logs to.
- # This log group exists in the current account. Set this value to `null` to avoid
- # publishing the trail logs to the logs group. The recommended configuration for
- # CloudTrail is (a) for each child account to aggregate its logs in an S3 bucket
- # in a single central account, such as a logs account and (b) to also store 14
- # days work of logs in CloudWatch in the child account itself for local debugging.
+ # Specify the name of the CloudWatch Logs group to publish the CloudTrail logs
+ # to. This log group exists in the current account. Set this value to `null`
+ # to avoid publishing the trail logs to the logs group. The recommended
+ # configuration for CloudTrail is (a) for each child account to aggregate its
+ # logs in an S3 bucket in a single central account, such as a logs account and
+ # (b) to also store 14 days work of logs in CloudWatch in the child account
+ # itself for local debugging.
cloudtrail_cloudwatch_logs_group_name = "cloudtrail-logs"
# If true, logging of data events will be enabled.
cloudtrail_data_logging_enabled = false
- # Specify if you want your event selector to include management events for your
- # trail.
+ # Specify if you want your event selector to include management events for
+ # your trail.
cloudtrail_data_logging_include_management_events = true
- # Specify if you want your trail to log read-only events, write-only events, or
- # all. Possible values are: ReadOnly, WriteOnly, All.
+ # Specify if you want your trail to log read-only events, write-only events,
+ # or all. Possible values are: ReadOnly, WriteOnly, All.
cloudtrail_data_logging_read_write_type = "All"
- # Data resources for which to log data events. This should be a map, where each
- # key is a data resource type, and each value is a list of data resource values.
- # Possible values for data resource types are: AWS::S3::Object,
- # AWS::Lambda::Function and AWS::DynamoDB::Table. See the 'data_resource' block
- # within the 'event_selector' block of the 'aws_cloudtrail' resource for context:
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # trail#data_resource.
+ # Data resources for which to log data events. This should be a map, where
+ # each key is a data resource type, and each value is a list of data resource
+ # values. Possible values for data resource types are: AWS::S3::Object,
+ # AWS::Lambda::Function and AWS::DynamoDB::Table. See the 'data_resource'
+ # block within the 'event_selector' block of the 'aws_cloudtrail' resource for
+ # context:
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudtrail#data_resource.
cloudtrail_data_logging_resources = {}
- # Whether or not to enable automatic annual rotation of the KMS key. Defaults to
- # true.
+ # Whether or not to enable automatic annual rotation of the KMS key. Defaults
+ # to true.
cloudtrail_enable_key_rotation = true
- # If set to true, when you run 'terraform destroy', delete all objects from the
- # bucket so that the bucket can be destroyed without error. Warning: these objects
- # are not recoverable so only use this if you're absolutely sure you want to
- # permanently delete everything!
+ # If set to true, when you run 'terraform destroy', delete all objects from
+ # the bucket so that the bucket can be destroyed without error. Warning: these
+ # objects are not recoverable so only use this if you're absolutely sure you
+ # want to permanently delete everything!
cloudtrail_force_destroy = false
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
cloudtrail_iam_role_permissions_boundary = null
- # Specifies whether the trail is an AWS Organizations trail. Organization trails
- # log events for the root account and all member accounts. Can only be created in
- # the organization root account. (true or false)
+ # Specifies whether the trail is an AWS Organizations trail. Organization
+ # trails log events for the root account and all member accounts. Can only be
+ # created in the organization root account. (true or false)
cloudtrail_is_organization_trail = false
# All CloudTrail Logs will be encrypted with a KMS Key (a Customer Master Key)
- # that governs access to write API calls older than 7 days and all read API calls.
- # The IAM Users specified in this list will have rights to change who can access
- # this extended log data. Note that if you specify a logs account (by setting
- # is_logs_account = true on one of the accounts in var.child_accounts), the KMS
- # CMK will be created in that account, and the root of that account will
- # automatically be made an admin of the CMK.
+ # that governs access to write API calls older than 7 days and all read API
+ # calls. The IAM Users specified in this list will have rights to change who
+ # can access this extended log data. Note that if you specify a logs account
+ # (by setting is_logs_account = true on one of the accounts in
+ # var.child_accounts), the KMS CMK will be created in that account, and the
+ # root of that account will automatically be made an admin of the CMK.
cloudtrail_kms_key_administrator_iam_arns = []
- # All CloudTrail Logs will be encrypted with a KMS CMK (Customer Master Key) that
- # governs access to write API calls older than 7 days and all read API calls. If
- # that CMK already exists, set this to the ARN of that CMK. Otherwise, set this to
- # null, and a new CMK will be created. If you set is_logs_account to true on one
- # of the accounts in var.child_accounts, the KMS CMK will be created in that
- # account (this is the recommended approach!).
+ # All CloudTrail Logs will be encrypted with a KMS CMK (Customer Master Key)
+ # that governs access to write API calls older than 7 days and all read API
+ # calls. If that CMK already exists, set this to the ARN of that CMK.
+ # Otherwise, set this to null, and a new CMK will be created. If you set
+ # is_logs_account to true on one of the accounts in var.child_accounts, the
+ # KMS CMK will be created in that account (this is the recommended approach!).
cloudtrail_kms_key_arn = null
- # If the kms_key_arn provided is an alias or alias ARN, then this must be set to
- # true so that the module will exchange the alias for a CMK ARN. Setting this to
- # true and using aliases requires
- # var.cloudtrail_allow_kms_describe_key_to_external_aws_accounts to also be true
- # for multi-account scenarios.
+ # If the kms_key_arn provided is an alias or alias ARN, then this must be set
+ # to true so that the module will exchange the alias for a CMK ARN. Setting
+ # this to true and using aliases requires
+ # var.cloudtrail_allow_kms_describe_key_to_external_aws_accounts to also be
+ # true for multi-account scenarios.
cloudtrail_kms_key_arn_is_alias = false
- # Additional service principals beyond CloudTrail that should have access to the
- # KMS key used to encrypt the logs. This is useful for granting access to the logs
- # for the purposes of constructing metric filters.
+ # Additional service principals beyond CloudTrail that should have access to
+ # the KMS key used to encrypt the logs. This is useful for granting access to
+ # the logs for the purposes of constructing metric filters.
cloudtrail_kms_key_service_principals = []
# All CloudTrail Logs will be encrypted with a KMS Key (a Customer Master Key)
- # that governs access to write API calls older than 7 days and all read API calls.
- # The IAM Users specified in this list will have read-only access to this extended
- # log data.
+ # that governs access to write API calls older than 7 days and all read API
+ # calls. The IAM Users specified in this list will have read-only access to
+ # this extended log data.
cloudtrail_kms_key_user_iam_arns = []
- # After this number of days, log files should be transitioned from S3 to Glacier.
- # Enter 0 to never archive log data.
+ # After this number of days, log files should be transitioned from S3 to
+ # Glacier. Enter 0 to never archive log data.
cloudtrail_num_days_after_which_archive_log_data = 30
- # After this number of days, log files should be deleted from S3. Enter 0 to never
- # delete log data.
+ # After this number of days, log files should be deleted from S3. Enter 0 to
+ # never delete log data.
cloudtrail_num_days_after_which_delete_log_data = 365
- # After this number of days, logs stored in CloudWatch will be deleted. Possible
- # values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827,
- # 3653, and 0 (default). When set to 0, logs will be retained indefinitely.
+ # After this number of days, logs stored in CloudWatch will be deleted.
+ # Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400,
+ # 545, 731, 1827, 3653, and 0 (default). When set to 0, logs will be retained
+ # indefinitely.
cloudtrail_num_days_to_retain_cloudwatch_logs = 0
- # The ID of the organization. Required only if an organization wide CloudTrail is
- # being setup and `create_organization` is set to false. The organization ID is
- # required to ensure that the entire organization is whitelisted in the CloudTrail
- # bucket write policy.
+ # The ID of the organization. Required only if an organization wide CloudTrail
+ # is being setup and `create_organization` is set to false. The organization
+ # ID is required to ensure that the entire organization is whitelisted in the
+ # CloudTrail bucket write policy.
cloudtrail_organization_id = null
- # The name of the S3 Bucket where CloudTrail logs will be stored. This could be a
- # bucket in this AWS account or the name of a bucket in another AWS account where
- # CloudTrail logs should be sent. If you set is_logs_account on one of the
- # accounts in var.child_accounts, the S3 bucket will be created in that account
- # (this is the recommended approach!).
+ # The name of the S3 Bucket where CloudTrail logs will be stored. This could
+ # be a bucket in this AWS account or the name of a bucket in another AWS
+ # account where CloudTrail logs should be sent. If you set is_logs_account on
+ # one of the accounts in var.child_accounts, the S3 bucket will be created in
+ # that account (this is the recommended approach!).
cloudtrail_s3_bucket_name = null
# Enable MFA delete for either 'Change the versioning state of your bucket' or
- # 'Permanently delete an object version'. This setting only applies to the bucket
- # used to storage Cloudtrail data. This cannot be used to toggle this setting but
- # is available to allow managed buckets to reflect the state in AWS. For
- # instructions on how to enable MFA Delete, check out the README from the
- # terraform-aws-security/private-s3-bucket module.
+ # 'Permanently delete an object version'. This setting only applies to the
+ # bucket used to storage Cloudtrail data. This cannot be used to toggle this
+ # setting but is available to allow managed buckets to reflect the state in
+ # AWS. For instructions on how to enable MFA Delete, check out the README
+ # from the terraform-aws-security/private-s3-bucket module.
cloudtrail_s3_mfa_delete = false
# If true, create an S3 bucket of name var.cloudtrail_s3_bucket_name for
- # CloudTrail logs, either in the logs account—the account in var.child_accounts
- # that has is_logs_account set to true (this is the recommended approach!)—or in
- # this account if none of the child accounts are marked as a logs account. If
- # false, assume var.cloudtrail_s3_bucket_name is an S3 bucket that already exists.
- # We recommend setting this to true and setting is_logs_account to true on one of
- # the accounts in var.child_accounts to use that account as a logs account where
- # you aggregate all your CloudTrail data. In case you want to disable the
- # CloudTrail module and the S3 bucket, you need to set both var.enable_cloudtrail
- # and cloudtrail_should_create_s3_bucket to false.
+ # CloudTrail logs, either in the logs account—the account in
+ # var.child_accounts that has is_logs_account set to true (this is the
+ # recommended approach!)—or in this account if none of the child accounts are
+ # marked as a logs account. If false, assume var.cloudtrail_s3_bucket_name is
+ # an S3 bucket that already exists. We recommend setting this to true and
+ # setting is_logs_account to true on one of the accounts in var.child_accounts
+ # to use that account as a logs account where you aggregate all your
+ # CloudTrail data. In case you want to disable the CloudTrail module and the
+ # S3 bucket, you need to set both var.enable_cloudtrail and
+ # cloudtrail_should_create_s3_bucket to false.
cloudtrail_should_create_s3_bucket = true
# Tags to apply to the CloudTrail resources.
cloudtrail_tags = {}
# Set to true to send the AWS Config data to another account (e.g., a logs
- # account) for aggregation purposes. You must set the ID of that other account via
- # the config_central_account_id variable. Note that if one of the accounts in
- # var.child_accounts has is_logs_account set to true (this is the approach we
- # recommended!), this variable will be assumed to be true, so you don't have to
- # pass any value for it. This redundant variable has to exist because Terraform
- # does not allow computed data in count and for_each parameters and
- # var.config_central_account_id may be computed if its the ID of a newly-created
- # AWS account.
+ # account) for aggregation purposes. You must set the ID of that other account
+ # via the config_central_account_id variable. Note that if one of the accounts
+ # in var.child_accounts has is_logs_account set to true (this is the approach
+ # we recommended!), this variable will be assumed to be true, so you don't
+ # have to pass any value for it. This redundant variable has to exist because
+ # Terraform does not allow computed data in count and for_each parameters and
+ # var.config_central_account_id may be computed if its the ID of a
+ # newly-created AWS account.
config_aggregate_config_data_in_external_account = false
# If the S3 bucket and SNS topics used for AWS Config live in a different AWS
- # account, set this variable to the ID of that account. If the S3 bucket and SNS
- # topics live in this account, set this variable to an empty string. Note that if
- # one of the accounts in var.child_accounts has is_logs_account set to true (this
- # is the approach we recommended!), that account's ID will be used automatically,
- # and you can leave this variable null.
+ # account, set this variable to the ID of that account. If the S3 bucket and
+ # SNS topics live in this account, set this variable to an empty string. Note
+ # that if one of the accounts in var.child_accounts has is_logs_account set to
+ # true (this is the approach we recommended!), that account's ID will be used
+ # automatically, and you can leave this variable null.
config_central_account_id = ""
- # Set to true to create account-level AWS Config rules directly in this account.
- # Set false to create org-level rules that apply to this account and all child
- # accounts. We recommend setting this to true to use account-level rules because
- # org-level rules create a chicken-and-egg problem with creating new accounts (see
- # this module's README for details).
+ # Set to true to create account-level AWS Config rules directly in this
+ # account. Set false to create org-level rules that apply to this account and
+ # all child accounts. We recommend setting this to true to use account-level
+ # rules because org-level rules create a chicken-and-egg problem with creating
+ # new accounts (see this module's README for details).
config_create_account_rules = true
# Optional KMS key to use for encrypting S3 objects on the AWS Config delivery
- # channel for an externally managed S3 bucket. This must belong to the same region
- # as the destination S3 bucket. If null, AWS Config will default to encrypting the
- # delivered data with AES-256 encryption. Only used if var.should_create_s3_bucket
- # is false - otherwise, var.config_s3_bucket_kms_key_arn is used.
+ # channel for an externally managed S3 bucket. This must belong to the same
+ # region as the destination S3 bucket. If null, AWS Config will default to
+ # encrypting the delivered data with AES-256 encryption. Only used if
+ # var.should_create_s3_bucket is false - otherwise,
+ # var.config_s3_bucket_kms_key_arn is used.
config_delivery_channel_kms_key_arn = null
- # If set to true, when you run 'terraform destroy', delete all objects from the
- # bucket so that the bucket can be destroyed without error. Warning: these objects
- # are not recoverable so only use this if you're absolutely sure you want to
- # permanently delete everything!
+ # If set to true, when you run 'terraform destroy', delete all objects from
+ # the bucket so that the bucket can be destroyed without error. Warning: these
+ # objects are not recoverable so only use this if you're absolutely sure you
+ # want to permanently delete everything!
config_force_destroy = false
- # After this number of days, log files should be transitioned from S3 to Glacier.
- # Enter 0 to never archive log data.
+ # After this number of days, log files should be transitioned from S3 to
+ # Glacier. Enter 0 to never archive log data.
config_num_days_after_which_archive_log_data = 365
- # After this number of days, log files should be deleted from S3. Enter 0 to never
- # delete log data.
+ # After this number of days, log files should be deleted from S3. Enter 0 to
+ # never delete log data.
config_num_days_after_which_delete_log_data = 730
- # Optional KMS key (in logs account) to use for encrypting S3 objects on the AWS
- # Config bucket, when the S3 bucket is created within this module
+ # Optional KMS key (in logs account) to use for encrypting S3 objects on the
+ # AWS Config bucket, when the S3 bucket is created within this module
# (var.config_should_create_s3_bucket is true). For encrypting S3 objects on
# delivery for an externally managed S3 bucket, refer to the
- # var.config_delivery_channel_kms_key_arn input variable. If null, data in S3 will
- # be encrypted using the default aws/s3 key. If provided, the key policy of the
- # provided key must permit the IAM role used by AWS Config. See
- # https://docs.aws.amazon.com/sns/latest/dg/sns-key-management.html. Note that the
- # KMS key must reside in the global recorder region (as configured by
+ # var.config_delivery_channel_kms_key_arn input variable. If null, data in S3
+ # will be encrypted using the default aws/s3 key. If provided, the key policy
+ # of the provided key must permit the IAM role used by AWS Config. See
+ # https://docs.aws.amazon.com/sns/latest/dg/sns-key-management.html. Note that
+ # the KMS key must reside in the global recorder region (as configured by
# var.aws_region).
config_s3_bucket_kms_key_arn = null
# The name of the S3 Bucket where Config items will be stored. This could be a
- # bucket in this AWS account or the name of a bucket in another AWS account where
- # Config items should be sent. If you set is_logs_account to true on one of the
- # accounts in var.child_accounts, the S3 bucket will be created in that account
- # (this is the recommended approach!).
+ # bucket in this AWS account or the name of a bucket in another AWS account
+ # where Config items should be sent. If you set is_logs_account to true on one
+ # of the accounts in var.child_accounts, the S3 bucket will be created in that
+ # account (this is the recommended approach!).
config_s3_bucket_name = null
# Enable MFA delete for either 'Change the versioning state of your bucket' or
- # 'Permanently delete an object version'. This setting only applies to the bucket
- # used to storage AWS Config data. This cannot be used to toggle this setting but
- # is available to allow managed buckets to reflect the state in AWS. For
- # instructions on how to enable MFA Delete, check out the README from the
- # terraform-aws-security/private-s3-bucket module.
+ # 'Permanently delete an object version'. This setting only applies to the
+ # bucket used to storage AWS Config data. This cannot be used to toggle this
+ # setting but is available to allow managed buckets to reflect the state in
+ # AWS. For instructions on how to enable MFA Delete, check out the README from
+ # the terraform-aws-security/private-s3-bucket module.
config_s3_mfa_delete = false
- # If true, create an S3 bucket of name var.config_s3_bucket_name for AWS Config
- # data, either in the logs account—the account in var.child_accounts that has
- # is_logs_account set to true (this is the recommended approach!)—or in this
- # account if none of the child accounts are marked as a logs account. If false,
- # assume var.config_s3_bucket_name is an S3 bucket that already exists. We
- # recommend setting this to true and setting is_logs_account to true on one of the
- # accounts in var.child_accounts to use that account as a logs account where you
- # aggregate all your AWS Config data. In case you want to disable the AWS Config
- # module and the S3 bucket, you need to set both var.enable_config and
- # config_should_create_s3_bucket to false.
+ # If true, create an S3 bucket of name var.config_s3_bucket_name for AWS
+ # Config data, either in the logs account—the account in var.child_accounts
+ # that has is_logs_account set to true (this is the recommended approach!)—or
+ # in this account if none of the child accounts are marked as a logs account.
+ # If false, assume var.config_s3_bucket_name is an S3 bucket that already
+ # exists. We recommend setting this to true and setting is_logs_account to
+ # true on one of the accounts in var.child_accounts to use that account as a
+ # logs account where you aggregate all your AWS Config data. In case you want
+ # to disable the AWS Config module and the S3 bucket, you need to set both
+ # var.enable_config and config_should_create_s3_bucket to false.
config_should_create_s3_bucket = true
# Set to true to create an SNS topic in this account for sending AWS Config
# notifications. Set to false to assume the topic specified in
- # var.config_sns_topic_name already exists in another AWS account (e.g the logs
- # account).
+ # var.config_sns_topic_name already exists in another AWS account (e.g the
+ # logs account).
config_should_create_sns_topic = false
- # Optional KMS key to use for each region for configuring default encryption for
- # the SNS topic (encoded as a map from region - e.g. us-east-1 - to ARN of KMS
- # key). If null or the region key is missing, encryption will not be configured
- # for the SNS topic in that region.
+ # Optional KMS key to use for each region for configuring default encryption
+ # for the SNS topic (encoded as a map from region - e.g. us-east-1 - to ARN of
+ # KMS key). If null or the region key is missing, encryption will not be
+ # configured for the SNS topic in that region.
config_sns_topic_kms_key_region_map = null
- # The name of the SNS Topic in where AWS Config notifications will be sent. Can be
- # in the same account or in another account.
+ # The name of the SNS Topic in where AWS Config notifications will be sent.
+ # Can be in the same account or in another account.
config_sns_topic_name = "ConfigTopic"
- # A map of tags to apply to the S3 Bucket. The key is the tag name and the value
- # is the tag value.
+ # A map of tags to apply to the S3 Bucket. The key is the tag name and the
+ # value is the tag value.
config_tags = {}
# List of AWS account identifiers to exclude from org-level Config rules. Only
# used if var.config_create_account_rules is false (not recommended).
configrules_excluded_accounts = []
- # The maximum frequency with which AWS Config runs evaluations for the ´PERIODIC´
- # rules. See
- # https://www.terraform.io/docs/providers/aws/r/config_organization_managed_rule.h
- # ml#maximum_execution_frequency
+ # The maximum frequency with which AWS Config runs evaluations for the
+ # ´PERIODIC´ rules. See
+ # https://www.terraform.io/docs/providers/aws/r/config_organization_managed_rule.html#maximum_execution_frequency
configrules_maximum_execution_frequency = "TwentyFour_Hours"
# Set to true to create/configure AWS Organizations for the first time in this
- # account. If you already configured AWS Organizations in your account, set this
- # to false; alternatively, you could set it to true and run 'terraform import' to
- # import you existing Organization.
+ # account. If you already configured AWS Organizations in your account, set
+ # this to false; alternatively, you could set it to true and run 'terraform
+ # import' to import you existing Organization.
create_organization = true
- # The name of the IAM group that will grant access to all external AWS accounts in
- # var.iam_groups_for_cross_account_access.
+ # The name of the IAM group that will grant access to all external AWS
+ # accounts in var.iam_groups_for_cross_account_access.
cross_account_access_all_group_name = "_all-accounts"
# A list of AWS services for which the developers from the accounts in
- # var.allow_dev_access_from_other_account_arns will receive full permissions. See
- # https://goo.gl/ZyoHlz to find the IAM Service name. For example, to grant
- # developers access only to EC2 and Amazon Machine Learning, use the value
- # ["ec2","machinelearning"]. Do NOT add iam to the list of services, or that will
- # grant Developers de facto admin access.
+ # var.allow_dev_access_from_other_account_arns will receive full permissions.
+ # See https://goo.gl/ZyoHlz to find the IAM Service name. For example, to
+ # grant developers access only to EC2 and Amazon Machine Learning, use the
+ # value ["ec2","machinelearning"]. Do NOT add iam to the list of services, or
+ # that will grant Developers de facto admin access.
dev_permitted_services = []
- # If set to true (default), all new EBS volumes will have encryption enabled by
- # default
+ # If set to true (default), all new EBS volumes will have encryption enabled
+ # by default
ebs_enable_encryption = true
- # Optional map of region names to KMS keys to use for EBS volume encryption when
- # var.ebs_use_existing_kms_keys is enabled.
+ # Optional map of region names to KMS keys to use for EBS volume encryption
+ # when var.ebs_use_existing_kms_keys is enabled.
ebs_kms_key_arns = {}
# If set to true, the KMS Customer Managed Keys (CMK) specified in
- # var.ebs_kms_key_arns will be set as the default for EBS encryption. When false
- # (default), the AWS-managed aws/ebs key will be used.
+ # var.ebs_kms_key_arns will be set as the default for EBS encryption. When
+ # false (default), the AWS-managed aws/ebs key will be used.
ebs_use_existing_kms_keys = false
- # Set to true to enable CloudTrail in the root account. Set to false to disable
- # CloudTrail (note: all other CloudTrail variables will be ignored). In case you
- # want to disable the CloudTrail module and the S3 bucket, you need to set both
- # var.enable_cloudtrail and cloudtrail_should_create_s3_bucket to false.
+ # Set to true to enable CloudTrail in the root account. Set to false to
+ # disable CloudTrail (note: all other CloudTrail variables will be ignored).
+ # In case you want to disable the CloudTrail module and the S3 bucket, you
+ # need to set both var.enable_cloudtrail and
+ # cloudtrail_should_create_s3_bucket to false.
enable_cloudtrail = true
- # Enables S3 server access logging which sends detailed records for the requests
- # that are made to the bucket. Defaults to false.
+ # Enables S3 server access logging which sends detailed records for the
+ # requests that are made to the bucket. Defaults to false.
enable_cloudtrail_s3_server_access_logging = false
- # Set to true to enable AWS Config in the root account. Set to false to disable
- # AWS Config (note: all other AWS config variables will be ignored). In case you
- # want to disable the CloudTrail module and the S3 bucket, you need to set both
- # var.enable_cloudtrail and cloudtrail_should_create_s3_bucket to false.
+ # Set to true to enable AWS Config in the root account. Set to false to
+ # disable AWS Config (note: all other AWS config variables will be ignored).
+ # In case you want to disable the CloudTrail module and the S3 bucket, you
+ # need to set both var.enable_cloudtrail and
+ # cloudtrail_should_create_s3_bucket to false.
enable_config = true
# Checks whether the EBS volumes that are in an attached state are encrypted.
@@ -1316,8 +1333,8 @@ Refer to the AWS docs on data event selection for more details on the
# When true, create an Open ID Connect Provider that GitHub actions can use to
# assume IAM roles in the account. Refer to
- # https://docs.github.com/en/actions/deployment/security-hardening-your-deployment
- # /configuring-openid-connect-in-amazon-web-services for more information.
+ # https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services
+ # for more information.
enable_github_actions_access = false
# A feature flag to enable or disable this module.
@@ -1333,15 +1350,15 @@ Refer to the AWS docs on data event selection for more details on the
# requirements.
enable_iam_password_policy = true
- # Checks whether the security group with 0.0.0.0/0 of any Amazon Virtual Private
- # Cloud (Amazon VPC) allows only specific inbound TCP or UDP traffic.
+ # Checks whether the security group with 0.0.0.0/0 of any Amazon Virtual
+ # Private Cloud (Amazon VPC) allows only specific inbound TCP or UDP traffic.
enable_insecure_sg_rules = true
# Checks whether storage encryption is enabled for your RDS DB instances.
enable_rds_storage_encrypted = true
- # Checks whether users of your AWS account require a multi-factor authentication
- # (MFA) device to sign in with root credentials.
+ # Checks whether users of your AWS account require a multi-factor
+ # authentication (MFA) device to sign in with root credentials.
enable_root_account_mfa = true
# Checks that your Amazon S3 buckets do not allow public read access.
@@ -1355,15 +1372,16 @@ Refer to the AWS docs on data event selection for more details on the
encrypted_volumes_kms_id = null
# When destroying this user, destroy even if it has non-Terraform-managed IAM
- # access keys, login profile, or MFA devices. Without force_destroy a user with
- # non-Terraform-managed access keys and login profile will fail to be destroyed.
+ # access keys, login profile, or MFA devices. Without force_destroy a user
+ # with non-Terraform-managed access keys and login profile will fail to be
+ # destroyed.
force_destroy_users = false
- # When set, use the statically provided hardcoded list of thumbprints rather than
- # looking it up dynamically. This is useful if you want to trade reliability of
- # the OpenID Connect Provider across certificate renewals with a static list that
- # is obtained using a trustworthy mechanism, to mitigate potential damage from a
- # domain hijacking attack on GitHub domains.
+ # When set, use the statically provided hardcoded list of thumbprints rather
+ # than looking it up dynamically. This is useful if you want to trade
+ # reliability of the OpenID Connect Provider across certificate renewals with
+ # a static list that is obtained using a trustworthy mechanism, to mitigate
+ # potential damage from a domain hijacking attack on GitHub domains.
github_actions_openid_connect_provider_thumbprint_list = null
# Name of the Cloudwatch event rules.
@@ -1372,9 +1390,9 @@ Refer to the AWS docs on data event selection for more details on the
# Specifies the frequency of notifications sent for subsequent finding
# occurrences. If the detector is a GuardDuty member account, the value is
# determined by the GuardDuty master account and cannot be modified, otherwise
- # defaults to SIX_HOURS. For standalone and GuardDuty master accounts, it must be
- # configured in Terraform to enable drift detection. Valid values for standalone
- # and master accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS.
+ # defaults to SIX_HOURS. For standalone and GuardDuty master accounts, it must
+ # be configured in Terraform to enable drift detection. Valid values for
+ # standalone and master accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS.
guardduty_finding_publishing_frequency = null
# Specifies a name for the created SNS topics where findings are published.
@@ -1387,42 +1405,42 @@ Refer to the AWS docs on data event selection for more details on the
# The name of the IAM Access Analyzer module
iam_access_analyzer_name = "baseline_root-iam_access_analyzer"
- # If set to ORGANIZATION, the analyzer will be scanning the current organization
- # and any policies that refer to linked resources such as S3, IAM, Lambda and SQS
- # policies.
+ # If set to ORGANIZATION, the analyzer will be scanning the current
+ # organization and any policies that refer to linked resources such as S3,
+ # IAM, Lambda and SQS policies.
iam_access_analyzer_type = "ORGANIZATION"
# A list of AWS services for which the developers IAM Group will receive full
# permissions. See https://goo.gl/ZyoHlz to find the IAM Service name. For
- # example, to grant developers access only to EC2 and Amazon Machine Learning, use
- # the value ["ec2","machinelearning"]. Do NOT add iam to the list of services, or
- # that will grant Developers de facto admin access. If you need to grant iam
- # privileges, just grant the user Full Access.
+ # example, to grant developers access only to EC2 and Amazon Machine Learning,
+ # use the value ["ec2","machinelearning"]. Do NOT add iam to the list of
+ # services, or that will grant Developers de facto admin access. If you need
+ # to grant iam privileges, just grant the user Full Access.
iam_group_developers_permitted_services = []
- # The list of names to be used for the IAM Group that enables its members to SSH
- # as a sudo user into any server configured with the ssh-grunt Gruntwork module.
- # Pass in multiple to configure multiple different IAM groups to control different
- # groupings of access at the server level. Pass in empty list to disable creation
- # of the IAM groups.
+ # The list of names to be used for the IAM Group that enables its members to
+ # SSH as a sudo user into any server configured with the ssh-grunt Gruntwork
+ # module. Pass in multiple to configure multiple different IAM groups to
+ # control different groupings of access at the server level. Pass in empty
+ # list to disable creation of the IAM groups.
iam_group_names_ssh_grunt_sudo_users = []
# The name to be used for the IAM Group that enables its members to SSH as a
- # non-sudo user into any server configured with the ssh-grunt Gruntwork module.
- # Pass in multiple to configure multiple different IAM groups to control different
- # groupings of access at the server level. Pass in empty list to disable creation
- # of the IAM groups.
+ # non-sudo user into any server configured with the ssh-grunt Gruntwork
+ # module. Pass in multiple to configure multiple different IAM groups to
+ # control different groupings of access at the server level. Pass in empty
+ # list to disable creation of the IAM groups.
iam_group_names_ssh_grunt_users = []
- # This variable is used to create groups that allow IAM users to assume roles in
- # your other AWS accounts. It should be a list of objects, where each object has
- # the fields 'group_name', which will be used as the name of the IAM group, and
- # 'iam_role_arns', which is a list of ARNs of IAM Roles that you can assume when
- # part of that group. For each entry in the list of objects, we will create an IAM
- # group that allows users to assume the given IAM role(s) in the other AWS
- # account. This allows you to define all your IAM users in one account (e.g. the
- # users account) and to grant them access to certain IAM roles in other accounts
- # (e.g. the stage, prod, audit accounts).
+ # This variable is used to create groups that allow IAM users to assume roles
+ # in your other AWS accounts. It should be a list of objects, where each
+ # object has the fields 'group_name', which will be used as the name of the
+ # IAM group, and 'iam_role_arns', which is a list of ARNs of IAM Roles that
+ # you can assume when part of that group. For each entry in the list of
+ # objects, we will create an IAM group that allows users to assume the given
+ # IAM role(s) in the other AWS account. This allows you to define all your IAM
+ # users in one account (e.g. the users account) and to grant them access to
+ # certain IAM roles in other accounts (e.g. the stage, prod, audit accounts).
iam_groups_for_cross_account_access = []
# Allow users to change their own password.
@@ -1455,32 +1473,32 @@ Refer to the AWS docs on data event selection for more details on the
# The tags to apply to all the IAM role resources.
iam_role_tags = {}
- # Comma-separated list of TCP ports authorized to be open to 0.0.0.0/0. Ranges are
- # defined by a dash; for example, '443,1020-1025'.
+ # Comma-separated list of TCP ports authorized to be open to 0.0.0.0/0. Ranges
+ # are defined by a dash; for example, '443,1020-1025'.
insecure_sg_rules_authorized_tcp_ports = "443"
- # Comma-separated list of UDP ports authorized to be open to 0.0.0.0/0. Ranges are
- # defined by a dash; for example, '500,1020-1025'.
+ # Comma-separated list of UDP ports authorized to be open to 0.0.0.0/0. Ranges
+ # are defined by a dash; for example, '500,1020-1025'.
insecure_sg_rules_authorized_udp_ports = null
- # Specifies whether CloudTrail will log only API calls in the current region or in
- # all regions. (true or false)
+ # Specifies whether CloudTrail will log only API calls in the current region
+ # or in all regions. (true or false)
is_multi_region_trail = true
# List of AWS service principal names for which you want to enable integration
- # with your organization. Must have `organizations_feature_set` set to ALL. See
- # https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_servic
- # s.html
+ # with your organization. Must have `organizations_feature_set` set to ALL.
+ # See
+ # https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html
organizations_aws_service_access_principals = ["cloudtrail.amazonaws.com","config-multiaccountsetup.amazonaws.com","config.amazonaws.com","access-analyzer.amazonaws.com"]
# If set to ALLOW, the new account enables IAM users to access account billing
- # information if they have the required permissions. If set to DENY, then only the
- # root user of the new account can access account billing information.
+ # information if they have the required permissions. If set to DENY, then only
+ # the root user of the new account can access account billing information.
organizations_default_iam_user_access_to_billing = "ALLOW"
- # The name of an IAM role that Organizations automatically preconfigures in the
- # new member account. This role trusts the master account, allowing users in the
- # master account to assume the role, as permitted by the master account
+ # The name of an IAM role that Organizations automatically preconfigures in
+ # the new member account. This role trusts the master account, allowing users
+ # in the master account to assume the role, as permitted by the master account
# administrator.
organizations_default_role_name = "OrganizationAccountAccessRole"
@@ -1488,8 +1506,7 @@ Refer to the AWS docs on data event selection for more details on the
organizations_default_tags = {}
# List of Organizations policy types to enable in the Organization Root. See
- # https://docs.aws.amazon.com/organizations/latest/APIReference/API_EnablePolicyTy
- # e.html
+ # https://docs.aws.amazon.com/organizations/latest/APIReference/API_EnablePolicyType.html
organizations_enabled_policy_types = ["SERVICE_CONTROL_POLICY"]
# Specify `ALL` or `CONSOLIDATED_BILLING`.
@@ -1503,67 +1520,68 @@ Refer to the AWS docs on data event selection for more details on the
# storage encryption config rule.
rds_storage_encrypted_kms_id = null
- # Should we create the IAM Group for auto-deploy? Allows automated deployment by
- # granting the permissions specified in var.auto_deploy_permissions. (true or
- # false)
+ # Should we create the IAM Group for auto-deploy? Allows automated deployment
+ # by granting the permissions specified in var.auto_deploy_permissions. (true
+ # or false)
should_create_iam_group_auto_deploy = false
- # Should we create the IAM Group for billing? Allows read-write access to billing
- # features only. (true or false)
+ # Should we create the IAM Group for billing? Allows read-write access to
+ # billing features only. (true or false)
should_create_iam_group_billing = true
- # Should we create the IAM Group for developers? The permissions of that group are
- # specified via var.iam_group_developers_permitted_services. (true or false)
+ # Should we create the IAM Group for developers? The permissions of that group
+ # are specified via var.iam_group_developers_permitted_services. (true or
+ # false)
should_create_iam_group_developers = false
- # Should we create the IAM Group for full access? Allows full access to all AWS
- # resources. (true or false)
+ # Should we create the IAM Group for full access? Allows full access to all
+ # AWS resources. (true or false)
should_create_iam_group_full_access = true
# Should we create the IAM Group for logs? Allows read access to logs in
# CloudTrail, AWS Config, and CloudWatch. If var.cloudtrail_kms_key_arn is
- # specified, will also be given permissions to decrypt with the KMS CMK that is
- # used to encrypt CloudTrail logs. (true or false)
+ # specified, will also be given permissions to decrypt with the KMS CMK that
+ # is used to encrypt CloudTrail logs. (true or false)
should_create_iam_group_logs = false
- # Should we create the IAM Group for read-only? Allows read-only access to all AWS
- # resources. (true or false)
+ # Should we create the IAM Group for read-only? Allows read-only access to all
+ # AWS resources. (true or false)
should_create_iam_group_read_only = false
- # Should we create the IAM Group for support? Allows access to AWS support. (true
- # or false)
+ # Should we create the IAM Group for support? Allows access to AWS support.
+ # (true or false)
should_create_iam_group_support = true
- # Should we create the IAM Group for use-existing-iam-roles? Allow launching AWS
- # resources with existing IAM Roles, but no ability to create new IAM Roles. (true
- # or false)
+ # Should we create the IAM Group for use-existing-iam-roles? Allow launching
+ # AWS resources with existing IAM Roles, but no ability to create new IAM
+ # Roles. (true or false)
should_create_iam_group_use_existing_iam_roles = false
- # Should we create the IAM Group for user self-management? Allows users to manage
- # their own IAM user accounts, but not other IAM users. (true or false)
+ # Should we create the IAM Group for user self-management? Allows users to
+ # manage their own IAM user accounts, but not other IAM users. (true or false)
should_create_iam_group_user_self_mgmt = false
- # Should we require that all IAM Users use Multi-Factor Authentication for both
- # AWS API calls and the AWS Web Console? (true or false)
+ # Should we require that all IAM Users use Multi-Factor Authentication for
+ # both AWS API calls and the AWS Web Console? (true or false)
should_require_mfa = true
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
# A map of users to create. The keys are the user names and the values are an
# object with the optional keys 'groups' (a list of IAM groups to add the user
- # to), 'tags' (a map of tags to apply to the user), 'pgp_key' (either a base-64
- # encoded PGP public key, or a keybase username in the form keybase:username, used
- # to encrypt the user's credentials; required if create_login_profile or
- # create_access_keys is true), 'create_login_profile' (if set to true, create a
- # password to login to the AWS Web Console), 'create_access_keys' (if set to true,
- # create access keys for the user), 'path' (the path), and 'permissions_boundary'
- # (the ARN of the policy that is used to set the permissions boundary for the
- # user).
+ # to), 'tags' (a map of tags to apply to the user), 'pgp_key' (either a
+ # base-64 encoded PGP public key, or a keybase username in the form
+ # keybase:username, used to encrypt the user's credentials; required if
+ # create_login_profile or create_access_keys is true), 'create_login_profile'
+ # (if set to true, create a password to login to the AWS Web Console),
+ # 'create_access_keys' (if set to true, create access keys for the user),
+ # 'path' (the path), and 'permissions_boundary' (the ARN of the policy that is
+ # used to set the permissions boundary for the user).
users = {}
}
@@ -3744,11 +3762,11 @@ A map of user name to that user's AWS Web Console password, encrypted with that
diff --git a/docs/reference/services/landing-zone/aws-security-account-baseline-wrapper.md b/docs/reference/services/landing-zone/aws-security-account-baseline-wrapper.md
index 76cbca2580..4602d66837 100644
--- a/docs/reference/services/landing-zone/aws-security-account-baseline-wrapper.md
+++ b/docs/reference/services/landing-zone/aws-security-account-baseline-wrapper.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Account Baseline for security account
-View Source
+View Source
Release Notes
@@ -59,13 +59,13 @@ If you’ve never used the Service Catalog before, make sure to read
* Learn more about each individual module, click the link in the [Features](#features) section.
* [How to configure a production-grade AWS account structure](https://docs.gruntwork.io/guides/build-it-yourself/landing-zone/)
-* [How to use multi-region services](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/landingzone/account-baseline-root/core-concepts.md#how-to-use-multi-region-services)
+* [How to use multi-region services](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/landingzone/account-baseline-root/core-concepts.md#how-to-use-multi-region-services)
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/test): Automated tests for the modules and examples.
## Deploy
@@ -73,7 +73,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing/landingzone folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing/landingzone): The
+* [examples/for-learning-and-testing/landingzone folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing/landingzone): The
`examples/for-learning-and-testing/landingzone` folder contains standalone sample code optimized for learning,
experimenting, and testing (but not direct production usage).
@@ -81,7 +81,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -101,7 +101,7 @@ If you want to deploy this repo in production, check out the following resources
module "account_baseline_security" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/account-baseline-security?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/account-baseline-security?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -115,85 +115,87 @@ module "account_baseline_security" {
# GuardDuty.
aws_region =
- # Creates resources in the specified regions. The best practice is to enable AWS
- # Config in all enabled regions in your AWS account. This variable must NOT be set
- # to null or empty. Otherwise, we won't know which regions to use and authenticate
- # to, and may use some not enabled in your AWS account (e.g., GovCloud, China,
- # etc). To get the list of regions enabled in your AWS account, you can use the
- # AWS CLI: aws ec2 describe-regions.
+ # Creates resources in the specified regions. The best practice is to enable
+ # AWS Config in all enabled regions in your AWS account. This variable must
+ # NOT be set to null or empty. Otherwise, we won't know which regions to use
+ # and authenticate to, and may use some not enabled in your AWS account (e.g.,
+ # GovCloud, China, etc). To get the list of regions enabled in your AWS
+ # account, you can use the AWS CLI: aws ec2 describe-regions.
config_opt_in_regions =
- # Creates resources in the specified regions. The best practice is to enable EBS
- # Encryption in all enabled regions in your AWS account. This variable must NOT be
- # set to null or empty. Otherwise, we won't know which regions to use and
- # authenticate to, and may use some not enabled in your AWS account (e.g.,
- # GovCloud, China, etc). To get the list of regions enabled in your AWS account,
- # you can use the AWS CLI: aws ec2 describe-regions. The value provided for
- # global_recorder_region must be in this list.
+ # Creates resources in the specified regions. The best practice is to enable
+ # EBS Encryption in all enabled regions in your AWS account. This variable
+ # must NOT be set to null or empty. Otherwise, we won't know which regions to
+ # use and authenticate to, and may use some not enabled in your AWS account
+ # (e.g., GovCloud, China, etc). To get the list of regions enabled in your AWS
+ # account, you can use the AWS CLI: aws ec2 describe-regions. The value
+ # provided for global_recorder_region must be in this list.
ebs_opt_in_regions =
# Creates resources in the specified regions. The best practice is to enable
- # GuardDuty in all enabled regions in your AWS account. This variable must NOT be
- # set to null or empty. Otherwise, we won't know which regions to use and
+ # GuardDuty in all enabled regions in your AWS account. This variable must NOT
+ # be set to null or empty. Otherwise, we won't know which regions to use and
# authenticate to, and may use some not enabled in your AWS account (e.g.,
- # GovCloud, China, etc). To get the list of regions enabled in your AWS account,
- # you can use the AWS CLI: aws ec2 describe-regions. The value provided for
- # global_recorder_region must be in this list.
+ # GovCloud, China, etc). To get the list of regions enabled in your AWS
+ # account, you can use the AWS CLI: aws ec2 describe-regions. The value
+ # provided for global_recorder_region must be in this list.
guardduty_opt_in_regions =
- # Creates resources in the specified regions. The best practice is to enable IAM
- # Access Analyzer in all enabled regions in your AWS account. This variable must
- # NOT be set to null or empty. Otherwise, we won't know which regions to use and
- # authenticate to, and may use some not enabled in your AWS account (e.g.,
- # GovCloud, China, etc). To get the list of regions enabled in your AWS account,
- # you can use the AWS CLI: aws ec2 describe-regions. The value provided for
- # global_recorder_region must be in this list.
+ # Creates resources in the specified regions. The best practice is to enable
+ # IAM Access Analyzer in all enabled regions in your AWS account. This
+ # variable must NOT be set to null or empty. Otherwise, we won't know which
+ # regions to use and authenticate to, and may use some not enabled in your AWS
+ # account (e.g., GovCloud, China, etc). To get the list of regions enabled in
+ # your AWS account, you can use the AWS CLI: aws ec2 describe-regions. The
+ # value provided for global_recorder_region must be in this list.
iam_access_analyzer_opt_in_regions =
# Creates resources in the specified regions. This variable must NOT be set to
- # null or empty. Otherwise, we won't know which regions to use and authenticate
- # to, and may use some not enabled in your AWS account (e.g., GovCloud, China,
- # etc). To get the list of regions enabled in your AWS account, you can use the
- # AWS CLI: aws ec2 describe-regions. The value provided for global_recorder_region
- # must be in this list.
+ # null or empty. Otherwise, we won't know which regions to use and
+ # authenticate to, and may use some not enabled in your AWS account (e.g.,
+ # GovCloud, China, etc). To get the list of regions enabled in your AWS
+ # account, you can use the AWS CLI: aws ec2 describe-regions. The value
+ # provided for global_recorder_region must be in this list.
kms_cmk_opt_in_regions =
- # The name used to prefix AWS Config and Cloudtrail resources, including the S3
- # bucket names and SNS topics used for each.
+ # The name used to prefix AWS Config and Cloudtrail resources, including the
+ # S3 bucket names and SNS topics used for each.
name_prefix =
# ----------------------------------------------------------------------------------------------------
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Map of additional managed rules to add. The key is the name of the rule (e.g.
- # ´acm-certificate-expiration-check´) and the value is an object specifying the
- # rule details
+ # Map of additional managed rules to add. The key is the name of the rule
+ # (e.g. ´acm-certificate-expiration-check´) and the value is an object
+ # specifying the rule details
additional_config_rules = {}
- # Map of github repositories to the list of branches that are allowed to assume
- # the IAM role. The repository should be encoded as org/repo-name (e.g.,
- # gruntwork-io/terrraform-aws-ci). Allows GitHub Actions to assume the auto deploy
- # IAM role using an OpenID Connect Provider for the given repositories. Refer to
- # the docs for github-actions-iam-role for more information. Note that this is
- # mutually exclusive with var.allow_auto_deploy_from_other_account_arns. Only used
- # if var.enable_github_actions_access is true.
+ # Map of github repositories to the list of branches that are allowed to
+ # assume the IAM role. The repository should be encoded as org/repo-name
+ # (e.g., gruntwork-io/terrraform-aws-ci). Allows GitHub Actions to assume the
+ # auto deploy IAM role using an OpenID Connect Provider for the given
+ # repositories. Refer to the docs for github-actions-iam-role for more
+ # information. Note that this is mutually exclusive with
+ # var.allow_auto_deploy_from_other_account_arns. Only used if
+ # var.enable_github_actions_access is true.
allow_auto_deploy_from_github_actions_for_sources = {}
- # A list of IAM ARNs from other AWS accounts that will be allowed to assume the
- # auto deploy IAM role that has the permissions in var.auto_deploy_permissions.
+ # A list of IAM ARNs from other AWS accounts that will be allowed to assume
+ # the auto deploy IAM role that has the permissions in
+ # var.auto_deploy_permissions.
allow_auto_deploy_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_auto_deploy_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed full (read and
- # write) access to the billing info for this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed full (read
+ # and write) access to the billing info for this account.
allow_billing_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_billing_access_iam_role_permissions_boundary = null
# If true, an IAM Policy that grants access to CloudTrail will be honored. If
@@ -201,156 +203,160 @@ module "account_baseline_security" {
# CloudTrail and any IAM Policy grants will be ignored. (true or false)
allow_cloudtrail_access_with_iam = true
- # A list of IAM ARNs from other AWS accounts that will be allowed full (read and
- # write) access to the services in this account specified in
+ # A list of IAM ARNs from other AWS accounts that will be allowed full (read
+ # and write) access to the services in this account specified in
# var.dev_permitted_services.
allow_dev_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_dev_access_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed full (read and
- # write) access to this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed full (read
+ # and write) access to this account.
allow_full_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_full_access_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed access to the
- # logs in CloudTrail, AWS Config, and CloudWatch for this account. Will also be
- # given permissions to decrypt with the KMS CMK that is used to encrypt CloudTrail
- # logs.
+ # A list of IAM ARNs from other AWS accounts that will be allowed access to
+ # the logs in CloudTrail, AWS Config, and CloudWatch for this account. Will
+ # also be given permissions to decrypt with the KMS CMK that is used to
+ # encrypt CloudTrail logs.
allow_logs_access_from_other_account_arns = []
- # A list of IAM ARNs from other AWS accounts that will be allowed read-only access
- # to this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed read-only
+ # access to this account.
allow_read_only_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_read_only_access_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed read access to
- # IAM groups and publish SSH keys. This is used for ssh-grunt.
+ # A list of IAM ARNs from other AWS accounts that will be allowed read access
+ # to IAM groups and publish SSH keys. This is used for ssh-grunt.
allow_ssh_grunt_access_from_other_account_arns = []
- # A list of IAM ARNs from other AWS accounts that will be allowed support access
- # (AWSSupportAccess) to this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed support
+ # access (AWSSupportAccess) to this account.
allow_support_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_support_access_iam_role_permissions_boundary = null
- # A list of IAM permissions (e.g. ec2:*) that will be added to an IAM Group for
- # doing automated deployments. NOTE: If var.should_create_iam_group_auto_deploy is
- # true, the list must have at least one element (e.g. '*').
+ # A list of IAM permissions (e.g. ec2:*) that will be added to an IAM Group
+ # for doing automated deployments. NOTE: If
+ # var.should_create_iam_group_auto_deploy is true, the list must have at least
+ # one element (e.g. '*').
auto_deploy_permissions = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
aws_config_iam_role_permissions_boundary = null
# Whether or not to allow kms:DescribeKey to external AWS accounts with write
- # access to the CloudTrail bucket. This is useful during deployment so that you
- # don't have to pass around the KMS key ARN.
+ # access to the CloudTrail bucket. This is useful during deployment so that
+ # you don't have to pass around the KMS key ARN.
cloudtrail_allow_kms_describe_key_to_external_aws_accounts = false
- # Specify the name of the CloudWatch Logs group to publish the CloudTrail logs to.
- # This log group exists in the current account. Set this value to `null` to avoid
- # publishing the trail logs to the logs group. The recommended configuration for
- # CloudTrail is (a) for each child account to aggregate its logs in an S3 bucket
- # in a single central account, such as a logs account and (b) to also store 14
- # days work of logs in CloudWatch in the child account itself for local debugging.
+ # Specify the name of the CloudWatch Logs group to publish the CloudTrail logs
+ # to. This log group exists in the current account. Set this value to `null`
+ # to avoid publishing the trail logs to the logs group. The recommended
+ # configuration for CloudTrail is (a) for each child account to aggregate its
+ # logs in an S3 bucket in a single central account, such as a logs account and
+ # (b) to also store 14 days work of logs in CloudWatch in the child account
+ # itself for local debugging.
cloudtrail_cloudwatch_logs_group_name = "cloudtrail-logs"
# If true, logging of data events will be enabled.
cloudtrail_data_logging_enabled = false
- # Specify if you want your event selector to include management events for your
- # trail.
+ # Specify if you want your event selector to include management events for
+ # your trail.
cloudtrail_data_logging_include_management_events = true
- # Specify if you want your trail to log read-only events, write-only events, or
- # all. Possible values are: ReadOnly, WriteOnly, All.
+ # Specify if you want your trail to log read-only events, write-only events,
+ # or all. Possible values are: ReadOnly, WriteOnly, All.
cloudtrail_data_logging_read_write_type = "All"
- # Data resources for which to log data events. This should be a map, where each
- # key is a data resource type, and each value is a list of data resource values.
- # Possible values for data resource types are: AWS::S3::Object,
- # AWS::Lambda::Function and AWS::DynamoDB::Table. See the 'data_resource' block
- # within the 'event_selector' block of the 'aws_cloudtrail' resource for context:
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # trail#data_resource.
+ # Data resources for which to log data events. This should be a map, where
+ # each key is a data resource type, and each value is a list of data resource
+ # values. Possible values for data resource types are: AWS::S3::Object,
+ # AWS::Lambda::Function and AWS::DynamoDB::Table. See the 'data_resource'
+ # block within the 'event_selector' block of the 'aws_cloudtrail' resource for
+ # context:
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudtrail#data_resource.
cloudtrail_data_logging_resources = {}
- # A list of external AWS accounts that should be given write access for CloudTrail
- # logs to this S3 bucket. This is useful when aggregating CloudTrail logs for
- # multiple AWS accounts in one common S3 bucket.
+ # A list of external AWS accounts that should be given write access for
+ # CloudTrail logs to this S3 bucket. This is useful when aggregating
+ # CloudTrail logs for multiple AWS accounts in one common S3 bucket.
cloudtrail_external_aws_account_ids_with_write_access = []
- # If set to true, when you run 'terraform destroy', delete all objects from the
- # bucket so that the bucket can be destroyed without error. Warning: these objects
- # are not recoverable so only use this if you're absolutely sure you want to
- # permanently delete everything!
+ # If set to true, when you run 'terraform destroy', delete all objects from
+ # the bucket so that the bucket can be destroyed without error. Warning: these
+ # objects are not recoverable so only use this if you're absolutely sure you
+ # want to permanently delete everything!
cloudtrail_force_destroy = false
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
cloudtrail_iam_role_permissions_boundary = null
# All CloudTrail Logs will be encrypted with a KMS Key (a Customer Master Key)
- # that governs access to write API calls older than 7 days and all read API calls.
- # The IAM Users specified in this list will have rights to change who can access
- # this extended log data.
+ # that governs access to write API calls older than 7 days and all read API
+ # calls. The IAM Users specified in this list will have rights to change who
+ # can access this extended log data.
cloudtrail_kms_key_administrator_iam_arns = []
- # All CloudTrail Logs will be encrypted with a KMS CMK (Customer Master Key) that
- # governs access to write API calls older than 7 days and all read API calls. If
- # that CMK already exists, set this to the ARN of that CMK. Otherwise, set this to
- # null, and a new CMK will be created. We recommend setting this to the ARN of a
- # CMK that already exists in a separate logs account.
+ # All CloudTrail Logs will be encrypted with a KMS CMK (Customer Master Key)
+ # that governs access to write API calls older than 7 days and all read API
+ # calls. If that CMK already exists, set this to the ARN of that CMK.
+ # Otherwise, set this to null, and a new CMK will be created. We recommend
+ # setting this to the ARN of a CMK that already exists in a separate logs
+ # account.
cloudtrail_kms_key_arn = null
- # If the kms_key_arn provided is an alias or alias ARN, then this must be set to
- # true so that the module will exchange the alias for a CMK ARN. Setting this to
- # true and using aliases requires
- # var.cloudtrail_allow_kms_describe_key_to_external_aws_accounts to also be true
- # for multi-account scenarios.
+ # If the kms_key_arn provided is an alias or alias ARN, then this must be set
+ # to true so that the module will exchange the alias for a CMK ARN. Setting
+ # this to true and using aliases requires
+ # var.cloudtrail_allow_kms_describe_key_to_external_aws_accounts to also be
+ # true for multi-account scenarios.
cloudtrail_kms_key_arn_is_alias = false
- # Additional service principals beyond CloudTrail that should have access to the
- # KMS key used to encrypt the logs. This is useful for granting access to the logs
- # for the purposes of constructing metric filters.
+ # Additional service principals beyond CloudTrail that should have access to
+ # the KMS key used to encrypt the logs. This is useful for granting access to
+ # the logs for the purposes of constructing metric filters.
cloudtrail_kms_key_service_principals = []
# All CloudTrail Logs will be encrypted with a KMS Key (a Customer Master Key)
- # that governs access to write API calls older than 7 days and all read API calls.
- # The IAM Users specified in this list will have read-only access to this extended
- # log data.
+ # that governs access to write API calls older than 7 days and all read API
+ # calls. The IAM Users specified in this list will have read-only access to
+ # this extended log data.
cloudtrail_kms_key_user_iam_arns = []
- # After this number of days, log files should be transitioned from S3 to Glacier.
- # Enter 0 to never archive log data.
+ # After this number of days, log files should be transitioned from S3 to
+ # Glacier. Enter 0 to never archive log data.
cloudtrail_num_days_after_which_archive_log_data = 30
- # After this number of days, log files should be deleted from S3. Enter 0 to never
- # delete log data.
+ # After this number of days, log files should be deleted from S3. Enter 0 to
+ # never delete log data.
cloudtrail_num_days_after_which_delete_log_data = 365
- # After this number of days, logs stored in CloudWatch will be deleted. Possible
- # values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827,
- # 3653, and 0 (default). When set to 0, logs will be retained indefinitely.
+ # After this number of days, logs stored in CloudWatch will be deleted.
+ # Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400,
+ # 545, 731, 1827, 3653, and 0 (default). When set to 0, logs will be retained
+ # indefinitely.
cloudtrail_num_days_to_retain_cloudwatch_logs = 0
# Set to false to create an S3 bucket of name var.cloudtrail_s3_bucket_name in
# this account for storing CloudTrail logs. Set to true to assume the bucket
# specified in var.cloudtrail_s3_bucket_name already exists in another AWS
# account. We recommend setting this to true and setting
- # var.cloudtrail_s3_bucket_name to the name of a bucket that already exists in a
- # separate logs account.
+ # var.cloudtrail_s3_bucket_name to the name of a bucket that already exists in
+ # a separate logs account.
cloudtrail_s3_bucket_already_exists = false
# The name of the S3 Bucket where CloudTrail logs will be stored. If value is
@@ -358,148 +364,151 @@ module "account_baseline_security" {
cloudtrail_s3_bucket_name = null
# Enable MFA delete for either 'Change the versioning state of your bucket' or
- # 'Permanently delete an object version'. This setting only applies to the bucket
- # used to storage Cloudtrail data. This cannot be used to toggle this setting but
- # is available to allow managed buckets to reflect the state in AWS. For
- # instructions on how to enable MFA Delete, check out the README from the
- # terraform-aws-security/private-s3-bucket module.
+ # 'Permanently delete an object version'. This setting only applies to the
+ # bucket used to storage Cloudtrail data. This cannot be used to toggle this
+ # setting but is available to allow managed buckets to reflect the state in
+ # AWS. For instructions on how to enable MFA Delete, check out the README from
+ # the terraform-aws-security/private-s3-bucket module.
cloudtrail_s3_mfa_delete = false
# Tags to apply to the CloudTrail resources.
cloudtrail_tags = {}
# Set to true to send the AWS Config data to another account (e.g., a logs
- # account) for aggregation purposes. You must set the ID of that other account via
- # the config_central_account_id variable. This redundant variable has to exist
- # because Terraform does not allow computed data in count and for_each parameters
- # and var.config_central_account_id may be computed if its the ID of a
- # newly-created AWS account.
+ # account) for aggregation purposes. You must set the ID of that other account
+ # via the config_central_account_id variable. This redundant variable has to
+ # exist because Terraform does not allow computed data in count and for_each
+ # parameters and var.config_central_account_id may be computed if its the ID
+ # of a newly-created AWS account.
config_aggregate_config_data_in_external_account = false
# If the S3 bucket and SNS topics used for AWS Config live in a different AWS
- # account, set this variable to the ID of that account. If the S3 bucket and SNS
- # topics live in this account, set this variable to null. We recommend setting
- # this to the ID of a separate logs account. Only used if
+ # account, set this variable to the ID of that account. If the S3 bucket and
+ # SNS topics live in this account, set this variable to null. We recommend
+ # setting this to the ID of a separate logs account. Only used if
# var.config_aggregate_config_data_in_external_account is true.
config_central_account_id = null
- # Set to true to create AWS Config rules directly in this account. Set false to
- # not create any Config rules in this account (i.e., if you created the rules at
- # the organization level already). We recommend setting this to true to use
- # account-level rules because org-level rules create a chicken-and-egg problem
- # with creating new accounts.
+ # Set to true to create AWS Config rules directly in this account. Set false
+ # to not create any Config rules in this account (i.e., if you created the
+ # rules at the organization level already). We recommend setting this to true
+ # to use account-level rules because org-level rules create a chicken-and-egg
+ # problem with creating new accounts.
config_create_account_rules = true
# Optional KMS key to use for encrypting S3 objects on the AWS Config delivery
- # channel for an externally managed S3 bucket. This must belong to the same region
- # as the destination S3 bucket. If null, AWS Config will default to encrypting the
- # delivered data with AES-256 encryption. Only used if var.should_create_s3_bucket
- # is false - otherwise, var.kms_key_arn is used.
+ # channel for an externally managed S3 bucket. This must belong to the same
+ # region as the destination S3 bucket. If null, AWS Config will default to
+ # encrypting the delivered data with AES-256 encryption. Only used if
+ # var.should_create_s3_bucket is false - otherwise, var.kms_key_arn is used.
config_delivery_channel_kms_key_arn = null
- # Same as var.config_delivery_channel_kms_key_arn, except the value is a name of a
- # KMS key configured with var.kms_customer_master_keys. The module created KMS key
- # for the delivery region (indexed by the name) will be used. Note that if both
- # var.config_delivery_channel_kms_key_arn and
+ # Same as var.config_delivery_channel_kms_key_arn, except the value is a name
+ # of a KMS key configured with var.kms_customer_master_keys. The module
+ # created KMS key for the delivery region (indexed by the name) will be used.
+ # Note that if both var.config_delivery_channel_kms_key_arn and
# var.config_delivery_channel_kms_key_by_name are configured, the key in
# var.config_delivery_channel_kms_key_arn will always be used.
config_delivery_channel_kms_key_by_name = null
- # If set to true, when you run 'terraform destroy', delete all objects from the
- # bucket so that the bucket can be destroyed without error. Warning: these objects
- # are not recoverable so only use this if you're absolutely sure you want to
- # permanently delete everything!
+ # If set to true, when you run 'terraform destroy', delete all objects from
+ # the bucket so that the bucket can be destroyed without error. Warning: these
+ # objects are not recoverable so only use this if you're absolutely sure you
+ # want to permanently delete everything!
config_force_destroy = false
- # Provide a list of AWS account IDs that will send Config data to this account.
- # This is useful if your aggregating config data in this account for other
- # accounts.
+ # Provide a list of AWS account IDs that will send Config data to this
+ # account. This is useful if your aggregating config data in this account for
+ # other accounts.
config_linked_accounts = []
- # After this number of days, log files should be transitioned from S3 to Glacier.
- # Enter 0 to never archive log data.
+ # After this number of days, log files should be transitioned from S3 to
+ # Glacier. Enter 0 to never archive log data.
config_num_days_after_which_archive_log_data = 365
- # After this number of days, log files should be deleted from S3. Enter 0 to never
- # delete log data.
+ # After this number of days, log files should be deleted from S3. Enter 0 to
+ # never delete log data.
config_num_days_after_which_delete_log_data = 730
- # Optional KMS key to use for encrypting S3 objects on the AWS Config bucket, when
- # the S3 bucket is created within this module (var.config_should_create_s3_bucket
- # is true). For encrypting S3 objects on delivery for an externally managed S3
- # bucket, refer to the var.config_delivery_channel_kms_key_arn input variable. If
- # null, data in S3 will be encrypted using the default aws/s3 key. If provided,
- # the key policy of the provided key must permit the IAM role used by AWS Config.
- # See https://docs.aws.amazon.com/sns/latest/dg/sns-key-management.html. Note that
+ # Optional KMS key to use for encrypting S3 objects on the AWS Config bucket,
+ # when the S3 bucket is created within this module
+ # (var.config_should_create_s3_bucket is true). For encrypting S3 objects on
+ # delivery for an externally managed S3 bucket, refer to the
+ # var.config_delivery_channel_kms_key_arn input variable. If null, data in S3
+ # will be encrypted using the default aws/s3 key. If provided, the key policy
+ # of the provided key must permit the IAM role used by AWS Config. See
+ # https://docs.aws.amazon.com/sns/latest/dg/sns-key-management.html. Note that
# the KMS key must reside in the global recorder region (as configured by
# var.aws_region).
config_s3_bucket_kms_key_arn = null
- # Same as var.config_s3_bucket_kms_key_arn, except the value is a name of a KMS
- # key configured with var.kms_customer_master_keys. The module created KMS key for
- # the global recorder region (indexed by the name) will be used. Note that if both
- # var.config_s3_bucket_kms_key_arn and var.config_s3_bucket_kms_key_by_name are
- # configured, the key in var.config_s3_bucket_kms_key_arn will always be used.
+ # Same as var.config_s3_bucket_kms_key_arn, except the value is a name of a
+ # KMS key configured with var.kms_customer_master_keys. The module created KMS
+ # key for the global recorder region (indexed by the name) will be used. Note
+ # that if both var.config_s3_bucket_kms_key_arn and
+ # var.config_s3_bucket_kms_key_by_name are configured, the key in
+ # var.config_s3_bucket_kms_key_arn will always be used.
config_s3_bucket_kms_key_by_name = null
- # The name of the S3 Bucket where CloudTrail logs will be stored. This could be a
- # bucket in this AWS account or the name of a bucket in another AWS account where
- # logs should be sent. We recommend setting this to the name of a bucket in a
- # separate logs account.
+ # The name of the S3 Bucket where CloudTrail logs will be stored. This could
+ # be a bucket in this AWS account or the name of a bucket in another AWS
+ # account where logs should be sent. We recommend setting this to the name of
+ # a bucket in a separate logs account.
config_s3_bucket_name = null
# Enable MFA delete for either 'Change the versioning state of your bucket' or
- # 'Permanently delete an object version'. This setting only applies to the bucket
- # used to storage AWS Config data. This cannot be used to toggle this setting but
- # is available to allow managed buckets to reflect the state in AWS. For
- # instructions on how to enable MFA Delete, check out the README from the
- # terraform-aws-security/private-s3-bucket module.
+ # 'Permanently delete an object version'. This setting only applies to the
+ # bucket used to storage AWS Config data. This cannot be used to toggle this
+ # setting but is available to allow managed buckets to reflect the state in
+ # AWS. For instructions on how to enable MFA Delete, check out the README from
+ # the terraform-aws-security/private-s3-bucket module.
config_s3_mfa_delete = false
# Set to true to create an S3 bucket of name var.config_s3_bucket_name in this
- # account for storing AWS Config data. Set to false to assume the bucket specified
- # in var.config_s3_bucket_name already exists in another AWS account. We recommend
- # setting this to false and setting var.config_s3_bucket_name to the name off an
- # S3 bucket that already exists in a separate logs account.
+ # account for storing AWS Config data. Set to false to assume the bucket
+ # specified in var.config_s3_bucket_name already exists in another AWS
+ # account. We recommend setting this to false and setting
+ # var.config_s3_bucket_name to the name off an S3 bucket that already exists
+ # in a separate logs account.
config_should_create_s3_bucket = false
# Set to true to create an SNS topic in this account for sending AWS Config
- # notifications (e.g., if this is the logs account). Set to false to assume the
- # topic specified in var.config_sns_topic_name already exists in another AWS
- # account (e.g., if this is the stage or prod account and
+ # notifications (e.g., if this is the logs account). Set to false to assume
+ # the topic specified in var.config_sns_topic_name already exists in another
+ # AWS account (e.g., if this is the stage or prod account and
# var.config_sns_topic_name is the name of an SNS topic in the logs account).
config_should_create_sns_topic = false
- # Same as var.config_sns_topic_kms_key_region_map, except the value is a name of a
- # KMS key configured with var.kms_customer_master_keys. The module created KMS key
- # for each region (indexed by the name) will be used. Note that if an entry exists
- # for a region in both var.config_sns_topic_kms_key_region_map and
+ # Same as var.config_sns_topic_kms_key_region_map, except the value is a name
+ # of a KMS key configured with var.kms_customer_master_keys. The module
+ # created KMS key for each region (indexed by the name) will be used. Note
+ # that if an entry exists for a region in both
+ # var.config_sns_topic_kms_key_region_map and
# var.config_sns_topic_kms_key_by_name_region_map, then the key in
# var.config_sns_topic_kms_key_region_map will always be used.
config_sns_topic_kms_key_by_name_region_map = null
- # Optional KMS key to use for each region for configuring default encryption for
- # the SNS topic (encoded as a map from region - e.g. us-east-1 - to ARN of KMS
- # key). If null or the region key is missing, encryption will not be configured
- # for the SNS topic in that region.
+ # Optional KMS key to use for each region for configuring default encryption
+ # for the SNS topic (encoded as a map from region - e.g. us-east-1 - to ARN of
+ # KMS key). If null or the region key is missing, encryption will not be
+ # configured for the SNS topic in that region.
config_sns_topic_kms_key_region_map = null
- # The name of the SNS Topic in where AWS Config notifications will be sent. Can be
- # in the same account or in another account.
+ # The name of the SNS Topic in where AWS Config notifications will be sent.
+ # Can be in the same account or in another account.
config_sns_topic_name = "ConfigTopic"
- # A map of tags to apply to the S3 Bucket. The key is the tag name and the value
- # is the tag value.
+ # A map of tags to apply to the S3 Bucket. The key is the tag name and the
+ # value is the tag value.
config_tags = {}
- # The maximum frequency with which AWS Config runs evaluations for the ´PERIODIC´
- # rules. See
- # https://www.terraform.io/docs/providers/aws/r/config_organization_managed_rule.h
- # ml#maximum_execution_frequency
+ # The maximum frequency with which AWS Config runs evaluations for the
+ # ´PERIODIC´ rules. See
+ # https://www.terraform.io/docs/providers/aws/r/config_organization_managed_rule.html#maximum_execution_frequency
configrules_maximum_execution_frequency = "TwentyFour_Hours"
- # The name of the IAM group that will grant access to all external AWS accounts in
- # var.iam_groups_for_cross_account_access.
+ # The name of the IAM group that will grant access to all external AWS
+ # accounts in var.iam_groups_for_cross_account_access.
cross_account_access_all_group_name = "_all-accounts"
# A custom name to use for the Cloudtrail Trail. If null, defaults to the
@@ -507,15 +516,15 @@ module "account_baseline_security" {
custom_cloudtrail_trail_name = null
# A list of AWS services for which the developers from the accounts in
- # var.allow_dev_access_from_other_account_arns will receive full permissions. See
- # https://goo.gl/ZyoHlz to find the IAM Service name. For example, to grant
- # developers access only to EC2 and Amazon Machine Learning, use the value
- # ["ec2","machinelearning"]. Do NOT add iam to the list of services, or that will
- # grant Developers de facto admin access.
+ # var.allow_dev_access_from_other_account_arns will receive full permissions.
+ # See https://goo.gl/ZyoHlz to find the IAM Service name. For example, to
+ # grant developers access only to EC2 and Amazon Machine Learning, use the
+ # value ["ec2","machinelearning"]. Do NOT add iam to the list of services, or
+ # that will grant Developers de facto admin access.
dev_permitted_services = []
- # If set to true (default), all new EBS volumes will have encryption enabled by
- # default
+ # If set to true (default), all new EBS volumes will have encryption enabled
+ # by default
ebs_enable_encryption = true
# The name of the KMS CMK to use by default for encrypting EBS volumes, if
@@ -524,15 +533,15 @@ module "account_baseline_security" {
ebs_kms_key_name = ""
# If set to true, the KMS Customer Managed Keys (CMK) with the name in
- # var.ebs_kms_key_name will be set as the default for EBS encryption. When false
- # (default), the AWS-managed aws/ebs key will be used.
+ # var.ebs_kms_key_name will be set as the default for EBS encryption. When
+ # false (default), the AWS-managed aws/ebs key will be used.
ebs_use_existing_kms_keys = false
- # Set to true (default) to enable CloudTrail in the security account. Set to false
- # to disable CloudTrail (note: all other CloudTrail variables will be ignored).
- # Note that if you have enabled organization trail in the root (parent) account,
- # you should set this to false; the organization trail will enable CloudTrail on
- # child accounts by default.
+ # Set to true (default) to enable CloudTrail in the security account. Set to
+ # false to disable CloudTrail (note: all other CloudTrail variables will be
+ # ignored). Note that if you have enabled organization trail in the root
+ # (parent) account, you should set this to false; the organization trail will
+ # enable CloudTrail on child accounts by default.
enable_cloudtrail = true
# Set to true to enable AWS Config in the security account. Set to false to
@@ -544,15 +553,15 @@ module "account_baseline_security" {
# When true, create an Open ID Connect Provider that GitHub actions can use to
# assume IAM roles in the account. Refer to
- # https://docs.github.com/en/actions/deployment/security-hardening-your-deployment
- # /configuring-openid-connect-in-amazon-web-services for more information.
+ # https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services
+ # for more information.
enable_github_actions_access = false
- # Set to true (default) to enable GuardDuty in this app account. Set to false to
- # disable GuardDuty (note: all other GuardDuty variables will be ignored). Note
- # that if you have enabled organization level GuardDuty in the root (parent)
- # account, you should set this to false; the organization GuardDuty will enable
- # GuardDuty on child accounts by default.
+ # Set to true (default) to enable GuardDuty in this app account. Set to false
+ # to disable GuardDuty (note: all other GuardDuty variables will be ignored).
+ # Note that if you have enabled organization level GuardDuty in the root
+ # (parent) account, you should set this to false; the organization GuardDuty
+ # will enable GuardDuty on child accounts by default.
enable_guardduty = true
# A feature flag to enable or disable this module.
@@ -568,15 +577,15 @@ module "account_baseline_security" {
# requirements.
enable_iam_password_policy = true
- # Checks whether the security group with 0.0.0.0/0 of any Amazon Virtual Private
- # Cloud (Amazon VPC) allows only specific inbound TCP or UDP traffic.
+ # Checks whether the security group with 0.0.0.0/0 of any Amazon Virtual
+ # Private Cloud (Amazon VPC) allows only specific inbound TCP or UDP traffic.
enable_insecure_sg_rules = true
# Checks whether storage encryption is enabled for your RDS DB instances.
enable_rds_storage_encrypted = true
- # Checks whether users of your AWS account require a multi-factor authentication
- # (MFA) device to sign in with root credentials.
+ # Checks whether users of your AWS account require a multi-factor
+ # authentication (MFA) device to sign in with root credentials.
enable_root_account_mfa = true
# Checks that your Amazon S3 buckets do not allow public read access.
@@ -590,15 +599,16 @@ module "account_baseline_security" {
encrypted_volumes_kms_id = null
# When destroying this user, destroy even if it has non-Terraform-managed IAM
- # access keys, login profile, or MFA devices. Without force_destroy a user with
- # non-Terraform-managed access keys and login profile will fail to be destroyed.
+ # access keys, login profile, or MFA devices. Without force_destroy a user
+ # with non-Terraform-managed access keys and login profile will fail to be
+ # destroyed.
force_destroy_users = false
- # When set, use the statically provided hardcoded list of thumbprints rather than
- # looking it up dynamically. This is useful if you want to trade reliability of
- # the OpenID Connect Provider across certificate renewals with a static list that
- # is obtained using a trustworthy mechanism, to mitigate potential damage from a
- # domain hijacking attack on GitHub domains.
+ # When set, use the statically provided hardcoded list of thumbprints rather
+ # than looking it up dynamically. This is useful if you want to trade
+ # reliability of the OpenID Connect Provider across certificate renewals with
+ # a static list that is obtained using a trustworthy mechanism, to mitigate
+ # potential damage from a domain hijacking attack on GitHub domains.
github_actions_openid_connect_provider_thumbprint_list = null
# Name of the Cloudwatch event rules.
@@ -607,9 +617,9 @@ module "account_baseline_security" {
# Specifies the frequency of notifications sent for subsequent finding
# occurrences. If the detector is a GuardDuty member account, the value is
# determined by the GuardDuty master account and cannot be modified, otherwise
- # defaults to SIX_HOURS. For standalone and GuardDuty master accounts, it must be
- # configured in Terraform to enable drift detection. Valid values for standalone
- # and master accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS.
+ # defaults to SIX_HOURS. For standalone and GuardDuty master accounts, it must
+ # be configured in Terraform to enable drift detection. Valid values for
+ # standalone and master accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS.
guardduty_finding_publishing_frequency = null
# Specifies a name for the created SNS topics where findings are published.
@@ -622,17 +632,17 @@ module "account_baseline_security" {
# The name of the IAM Access Analyzer module
iam_access_analyzer_name = "baseline_security-iam_access_analyzer"
- # If set to ACCOUNT, the analyzer will only be scanning the current AWS account
- # it's in. If set to ORGANIZATION - will scan the organization AWS account and the
- # child accounts.
+ # If set to ACCOUNT, the analyzer will only be scanning the current AWS
+ # account it's in. If set to ORGANIZATION - will scan the organization AWS
+ # account and the child accounts.
iam_access_analyzer_type = "ACCOUNT"
# A list of AWS services for which the developers IAM Group will receive full
# permissions. See https://goo.gl/ZyoHlz to find the IAM Service name. For
- # example, to grant developers access only to EC2 and Amazon Machine Learning, use
- # the value ["ec2","machinelearning"]. Do NOT add iam to the list of services, or
- # that will grant Developers de facto admin access. If you need to grant iam
- # privileges, just grant the user Full Access.
+ # example, to grant developers access only to EC2 and Amazon Machine Learning,
+ # use the value ["ec2","machinelearning"]. Do NOT add iam to the list of
+ # services, or that will grant Developers de facto admin access. If you need
+ # to grant iam privileges, just grant the user Full Access.
iam_group_developers_permitted_services = []
# The name of the IAM Group that allows automated deployment by graning the
@@ -643,8 +653,8 @@ module "account_baseline_security" {
# billing features in AWS.
iam_group_name_billing = "billing"
- # The name to be used for the IAM Group that grants IAM Users a reasonable set of
- # permissions for developers.
+ # The name to be used for the IAM Group that grants IAM Users a reasonable set
+ # of permissions for developers.
iam_group_name_developers = "developers"
# The name to be used for the IAM Group that grants full access to all AWS
@@ -655,49 +665,49 @@ module "account_baseline_security" {
# Effectively grants administrator access.
iam_group_name_iam_admin = "iam-admin"
- # The name to be used for the IAM Group that grants IAM Users the permissions to
- # manage their own IAM User account.
+ # The name to be used for the IAM Group that grants IAM Users the permissions
+ # to manage their own IAM User account.
iam_group_name_iam_user_self_mgmt = "iam-user-self-mgmt"
- # The name to be used for the IAM Group that grants read access to CloudTrail, AWS
- # Config, and CloudWatch in AWS.
+ # The name to be used for the IAM Group that grants read access to CloudTrail,
+ # AWS Config, and CloudWatch in AWS.
iam_group_name_logs = "logs"
- # The name to be used for the IAM Group that grants read-only access to all AWS
- # resources.
+ # The name to be used for the IAM Group that grants read-only access to all
+ # AWS resources.
iam_group_name_read_only = "read-only"
# The name of the IAM Group that allows access to AWS Support.
iam_group_name_support = "support"
- # The name to be used for the IAM Group that grants IAM Users the permissions to
- # use existing IAM Roles when launching AWS Resources. This does NOT grant the
- # permission to create new IAM Roles.
+ # The name to be used for the IAM Group that grants IAM Users the permissions
+ # to use existing IAM Roles when launching AWS Resources. This does NOT grant
+ # the permission to create new IAM Roles.
iam_group_name_use_existing_iam_roles = "use-existing-iam-roles"
- # The list of names to be used for the IAM Group that enables its members to SSH
- # as a sudo user into any server configured with the ssh-grunt Gruntwork module.
- # Pass in multiple to configure multiple different IAM groups to control different
- # groupings of access at the server level. Pass in empty list to disable creation
- # of the IAM groups.
+ # The list of names to be used for the IAM Group that enables its members to
+ # SSH as a sudo user into any server configured with the ssh-grunt Gruntwork
+ # module. Pass in multiple to configure multiple different IAM groups to
+ # control different groupings of access at the server level. Pass in empty
+ # list to disable creation of the IAM groups.
iam_group_names_ssh_grunt_sudo_users = ["ssh-grunt-sudo-users"]
# The name to be used for the IAM Group that enables its members to SSH as a
- # non-sudo user into any server configured with the ssh-grunt Gruntwork module.
- # Pass in multiple to configure multiple different IAM groups to control different
- # groupings of access at the server level. Pass in empty list to disable creation
- # of the IAM groups.
+ # non-sudo user into any server configured with the ssh-grunt Gruntwork
+ # module. Pass in multiple to configure multiple different IAM groups to
+ # control different groupings of access at the server level. Pass in empty
+ # list to disable creation of the IAM groups.
iam_group_names_ssh_grunt_users = ["ssh-grunt-users"]
- # This variable is used to create groups that allow IAM users to assume roles in
- # your other AWS accounts. It should be a list of objects, where each object has
- # the fields 'group_name', which will be used as the name of the IAM group, and
- # 'iam_role_arns', which is a list of ARNs of IAM Roles that you can assume when
- # part of that group. For each entry in the list of objects, we will create an IAM
- # group that allows users to assume the given IAM role(s) in the other AWS
- # account. This allows you to define all your IAM users in one account (e.g. the
- # users account) and to grant them access to certain IAM roles in other accounts
- # (e.g. the stage, prod, audit accounts).
+ # This variable is used to create groups that allow IAM users to assume roles
+ # in your other AWS accounts. It should be a list of objects, where each
+ # object has the fields 'group_name', which will be used as the name of the
+ # IAM group, and 'iam_role_arns', which is a list of ARNs of IAM Roles that
+ # you can assume when part of that group. For each entry in the list of
+ # objects, we will create an IAM group that allows users to assume the given
+ # IAM role(s) in the other AWS account. This allows you to define all your IAM
+ # users in one account (e.g. the users account) and to grant them access to
+ # certain IAM roles in other accounts (e.g. the stage, prod, audit accounts).
iam_groups_for_cross_account_access = []
# Allow users to change their own password.
@@ -727,57 +737,59 @@ module "account_baseline_security" {
# Require at least one uppercase character in password.
iam_password_policy_require_uppercase_characters = true
- # The name to be used for the IAM Policy that grants IAM Users the permissions to
- # manage their own IAM User account.
+ # The name to be used for the IAM Policy that grants IAM Users the permissions
+ # to manage their own IAM User account.
iam_policy_iam_user_self_mgmt = "iam-user-self-mgmt"
# The tags to apply to all the IAM role resources.
iam_role_tags = {}
- # Comma-separated list of TCP ports authorized to be open to 0.0.0.0/0. Ranges are
- # defined by a dash; for example, '443,1020-1025'.
+ # Comma-separated list of TCP ports authorized to be open to 0.0.0.0/0. Ranges
+ # are defined by a dash; for example, '443,1020-1025'.
insecure_sg_rules_authorized_tcp_ports = "443"
- # Comma-separated list of UDP ports authorized to be open to 0.0.0.0/0. Ranges are
- # defined by a dash; for example, '500,1020-1025'.
+ # Comma-separated list of UDP ports authorized to be open to 0.0.0.0/0. Ranges
+ # are defined by a dash; for example, '500,1020-1025'.
insecure_sg_rules_authorized_udp_ports = null
- # A map of tags to apply to all KMS Keys to be created. In this map variable, the
- # key is the tag name and the value is the tag value.
+ # A map of tags to apply to all KMS Keys to be created. In this map variable,
+ # the key is the tag name and the value is the tag value.
kms_cmk_global_tags = {}
# You can use this variable to create account-level KMS Customer Master Keys
- # (CMKs) for encrypting and decrypting data. This variable should be a map where
- # the keys are the names of the CMK and the values are an object that defines the
- # configuration for that CMK. See the comment below for the configuration options
- # you can set for each key.
+ # (CMKs) for encrypting and decrypting data. This variable should be a map
+ # where the keys are the names of the CMK and the values are an object that
+ # defines the configuration for that CMK. See the comment below for the
+ # configuration options you can set for each key.
kms_customer_master_keys = {}
# The map of names of KMS grants to the region where the key resides in. There
- # should be a one to one mapping between entries in this map and the entries of
- # the kms_grants map. This is used to workaround a terraform limitation where the
- # for_each value can not depend on resources.
+ # should be a one to one mapping between entries in this map and the entries
+ # of the kms_grants map. This is used to workaround a terraform limitation
+ # where the for_each value can not depend on resources.
kms_grant_regions = {}
# Create the specified KMS grants to allow entities to use the KMS key without
- # modifying the KMS policy or IAM. This is necessary to allow AWS services (e.g.
- # ASG) to use CMKs encrypt and decrypt resources. The input is a map of grant name
- # to grant properties. The name must be unique per account.
+ # modifying the KMS policy or IAM. This is necessary to allow AWS services
+ # (e.g. ASG) to use CMKs encrypt and decrypt resources. The input is a map of
+ # grant name to grant properties. The name must be unique per account.
kms_grants = {}
- # The maximum allowable session duration, in seconds, for the credentials you get
- # when assuming the IAM roles created by this module. This variable applies to all
- # IAM roles created by this module that are intended for people to use, such as
- # allow-read-only-access-from-other-accounts. For IAM roles that are intended for
- # machine users, such as allow-auto-deploy-from-other-accounts, see
+ # The maximum allowable session duration, in seconds, for the credentials you
+ # get when assuming the IAM roles created by this module. This variable
+ # applies to all IAM roles created by this module that are intended for people
+ # to use, such as allow-read-only-access-from-other-accounts. For IAM roles
+ # that are intended for machine users, such as
+ # allow-auto-deploy-from-other-accounts, see
# var.max_session_duration_machine_users.
max_session_duration_human_users = 43200
- # The maximum allowable session duration, in seconds, for the credentials you get
- # when assuming the IAM roles created by this module. This variable applies to
- # all IAM roles created by this module that are intended for machine users, such
- # as allow-auto-deploy-from-other-accounts. For IAM roles that are intended for
- # human users, such as allow-read-only-access-from-other-accounts, see
+ # The maximum allowable session duration, in seconds, for the credentials you
+ # get when assuming the IAM roles created by this module. This variable
+ # applies to all IAM roles created by this module that are intended for
+ # machine users, such as allow-auto-deploy-from-other-accounts. For IAM roles
+ # that are intended for human users, such as
+ # allow-read-only-access-from-other-accounts, see
# var.max_session_duration_human_users.
max_session_duration_machine_users = 3600
@@ -790,35 +802,36 @@ module "account_baseline_security" {
rds_storage_encrypted_kms_id = null
# Create service-linked roles for this set of services. You should pass in the
- # URLs of the services, but without the protocol (e.g., http://) in front: e.g.,
- # use elasticbeanstalk.amazonaws.com for Elastic Beanstalk or es.amazonaws.com for
- # Amazon Elasticsearch. Service-linked roles are predefined by the service, can
- # typically only be assumed by that service, and include all the permissions that
- # the service requires to call other AWS services on your behalf. You can
- # typically only create one such role per AWS account, which is why this parameter
- # exists in the account baseline. See
- # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-wor
- # -with-iam.html for the list of services that support service-linked roles.
+ # URLs of the services, but without the protocol (e.g., http://) in front:
+ # e.g., use elasticbeanstalk.amazonaws.com for Elastic Beanstalk or
+ # es.amazonaws.com for Amazon Elasticsearch. Service-linked roles are
+ # predefined by the service, can typically only be assumed by that service,
+ # and include all the permissions that the service requires to call other AWS
+ # services on your behalf. You can typically only create one such role per AWS
+ # account, which is why this parameter exists in the account baseline. See
+ # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html
+ # for the list of services that support service-linked roles.
service_linked_roles = []
- # Should we create the IAM Group for auto-deploy? Allows automated deployment by
- # granting the permissions specified in var.auto_deploy_permissions. (true or
- # false)
+ # Should we create the IAM Group for auto-deploy? Allows automated deployment
+ # by granting the permissions specified in var.auto_deploy_permissions. (true
+ # or false)
should_create_iam_group_auto_deploy = false
- # Should we create the IAM Group for billing? Allows read-write access to billing
- # features only. (true or false)
+ # Should we create the IAM Group for billing? Allows read-write access to
+ # billing features only. (true or false)
should_create_iam_group_billing = false
# Should we create the IAM Group for access to all external AWS accounts?
should_create_iam_group_cross_account_access_all = true
- # Should we create the IAM Group for developers? The permissions of that group are
- # specified via var.iam_group_developers_permitted_services. (true or false)
+ # Should we create the IAM Group for developers? The permissions of that group
+ # are specified via var.iam_group_developers_permitted_services. (true or
+ # false)
should_create_iam_group_developers = false
- # Should we create the IAM Group for full access? Allows full access to all AWS
- # resources. (true or false)
+ # Should we create the IAM Group for full access? Allows full access to all
+ # AWS resources. (true or false)
should_create_iam_group_full_access = true
# Should we create the IAM Group for IAM administrator access? Allows users to
@@ -826,49 +839,49 @@ module "account_baseline_security" {
# false)
should_create_iam_group_iam_admin = false
- # Should we create the IAM Group for logs? Allows read access to CloudTrail, AWS
- # Config, and CloudWatch. If var.cloudtrail_kms_key_arn is set, will also give
- # decrypt access to a KMS CMK. (true or false)
+ # Should we create the IAM Group for logs? Allows read access to CloudTrail,
+ # AWS Config, and CloudWatch. If var.cloudtrail_kms_key_arn is set, will also
+ # give decrypt access to a KMS CMK. (true or false)
should_create_iam_group_logs = false
- # Should we create the IAM Group for read-only? Allows read-only access to all AWS
- # resources. (true or false)
+ # Should we create the IAM Group for read-only? Allows read-only access to all
+ # AWS resources. (true or false)
should_create_iam_group_read_only = false
# Should we create the IAM Group for support? Allows support access
# (AWSupportAccess). (true or false)
should_create_iam_group_support = false
- # Should we create the IAM Group for use-existing-iam-roles? Allow launching AWS
- # resources with existing IAM Roles, but no ability to create new IAM Roles. (true
- # or false)
+ # Should we create the IAM Group for use-existing-iam-roles? Allow launching
+ # AWS resources with existing IAM Roles, but no ability to create new IAM
+ # Roles. (true or false)
should_create_iam_group_use_existing_iam_roles = false
- # Should we create the IAM Group for user self-management? Allows users to manage
- # their own IAM user accounts, but not other IAM users. (true or false)
+ # Should we create the IAM Group for user self-management? Allows users to
+ # manage their own IAM user accounts, but not other IAM users. (true or false)
should_create_iam_group_user_self_mgmt = true
- # Should we require that all IAM Users use Multi-Factor Authentication for both
- # AWS API calls and the AWS Web Console? (true or false)
+ # Should we require that all IAM Users use Multi-Factor Authentication for
+ # both AWS API calls and the AWS Web Console? (true or false)
should_require_mfa = true
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
# A map of users to create. The keys are the user names and the values are an
# object with the optional keys 'groups' (a list of IAM groups to add the user
- # to), 'tags' (a map of tags to apply to the user), 'pgp_key' (either a base-64
- # encoded PGP public key, or a keybase username in the form keybase:username, used
- # to encrypt the user's credentials; required if create_login_profile or
- # create_access_keys is true), 'create_login_profile' (if set to true, create a
- # password to login to the AWS Web Console), 'create_access_keys' (if set to true,
- # create access keys for the user), 'path' (the path), and 'permissions_boundary'
- # (the ARN of the policy that is used to set the permissions boundary for the
- # user).
+ # to), 'tags' (a map of tags to apply to the user), 'pgp_key' (either a
+ # base-64 encoded PGP public key, or a keybase username in the form
+ # keybase:username, used to encrypt the user's credentials; required if
+ # create_login_profile or create_access_keys is true), 'create_login_profile'
+ # (if set to true, create a password to login to the AWS Web Console),
+ # 'create_access_keys' (if set to true, create access keys for the user),
+ # 'path' (the path), and 'permissions_boundary' (the ARN of the policy that is
+ # used to set the permissions boundary for the user).
users = {}
}
@@ -886,7 +899,7 @@ module "account_baseline_security" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/account-baseline-security?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/account-baseline-security?ref=v0.104.12"
}
inputs = {
@@ -903,85 +916,87 @@ inputs = {
# GuardDuty.
aws_region =
- # Creates resources in the specified regions. The best practice is to enable AWS
- # Config in all enabled regions in your AWS account. This variable must NOT be set
- # to null or empty. Otherwise, we won't know which regions to use and authenticate
- # to, and may use some not enabled in your AWS account (e.g., GovCloud, China,
- # etc). To get the list of regions enabled in your AWS account, you can use the
- # AWS CLI: aws ec2 describe-regions.
+ # Creates resources in the specified regions. The best practice is to enable
+ # AWS Config in all enabled regions in your AWS account. This variable must
+ # NOT be set to null or empty. Otherwise, we won't know which regions to use
+ # and authenticate to, and may use some not enabled in your AWS account (e.g.,
+ # GovCloud, China, etc). To get the list of regions enabled in your AWS
+ # account, you can use the AWS CLI: aws ec2 describe-regions.
config_opt_in_regions =
- # Creates resources in the specified regions. The best practice is to enable EBS
- # Encryption in all enabled regions in your AWS account. This variable must NOT be
- # set to null or empty. Otherwise, we won't know which regions to use and
- # authenticate to, and may use some not enabled in your AWS account (e.g.,
- # GovCloud, China, etc). To get the list of regions enabled in your AWS account,
- # you can use the AWS CLI: aws ec2 describe-regions. The value provided for
- # global_recorder_region must be in this list.
+ # Creates resources in the specified regions. The best practice is to enable
+ # EBS Encryption in all enabled regions in your AWS account. This variable
+ # must NOT be set to null or empty. Otherwise, we won't know which regions to
+ # use and authenticate to, and may use some not enabled in your AWS account
+ # (e.g., GovCloud, China, etc). To get the list of regions enabled in your AWS
+ # account, you can use the AWS CLI: aws ec2 describe-regions. The value
+ # provided for global_recorder_region must be in this list.
ebs_opt_in_regions =
# Creates resources in the specified regions. The best practice is to enable
- # GuardDuty in all enabled regions in your AWS account. This variable must NOT be
- # set to null or empty. Otherwise, we won't know which regions to use and
+ # GuardDuty in all enabled regions in your AWS account. This variable must NOT
+ # be set to null or empty. Otherwise, we won't know which regions to use and
# authenticate to, and may use some not enabled in your AWS account (e.g.,
- # GovCloud, China, etc). To get the list of regions enabled in your AWS account,
- # you can use the AWS CLI: aws ec2 describe-regions. The value provided for
- # global_recorder_region must be in this list.
+ # GovCloud, China, etc). To get the list of regions enabled in your AWS
+ # account, you can use the AWS CLI: aws ec2 describe-regions. The value
+ # provided for global_recorder_region must be in this list.
guardduty_opt_in_regions =
- # Creates resources in the specified regions. The best practice is to enable IAM
- # Access Analyzer in all enabled regions in your AWS account. This variable must
- # NOT be set to null or empty. Otherwise, we won't know which regions to use and
- # authenticate to, and may use some not enabled in your AWS account (e.g.,
- # GovCloud, China, etc). To get the list of regions enabled in your AWS account,
- # you can use the AWS CLI: aws ec2 describe-regions. The value provided for
- # global_recorder_region must be in this list.
+ # Creates resources in the specified regions. The best practice is to enable
+ # IAM Access Analyzer in all enabled regions in your AWS account. This
+ # variable must NOT be set to null or empty. Otherwise, we won't know which
+ # regions to use and authenticate to, and may use some not enabled in your AWS
+ # account (e.g., GovCloud, China, etc). To get the list of regions enabled in
+ # your AWS account, you can use the AWS CLI: aws ec2 describe-regions. The
+ # value provided for global_recorder_region must be in this list.
iam_access_analyzer_opt_in_regions =
# Creates resources in the specified regions. This variable must NOT be set to
- # null or empty. Otherwise, we won't know which regions to use and authenticate
- # to, and may use some not enabled in your AWS account (e.g., GovCloud, China,
- # etc). To get the list of regions enabled in your AWS account, you can use the
- # AWS CLI: aws ec2 describe-regions. The value provided for global_recorder_region
- # must be in this list.
+ # null or empty. Otherwise, we won't know which regions to use and
+ # authenticate to, and may use some not enabled in your AWS account (e.g.,
+ # GovCloud, China, etc). To get the list of regions enabled in your AWS
+ # account, you can use the AWS CLI: aws ec2 describe-regions. The value
+ # provided for global_recorder_region must be in this list.
kms_cmk_opt_in_regions =
- # The name used to prefix AWS Config and Cloudtrail resources, including the S3
- # bucket names and SNS topics used for each.
+ # The name used to prefix AWS Config and Cloudtrail resources, including the
+ # S3 bucket names and SNS topics used for each.
name_prefix =
# ----------------------------------------------------------------------------------------------------
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Map of additional managed rules to add. The key is the name of the rule (e.g.
- # ´acm-certificate-expiration-check´) and the value is an object specifying the
- # rule details
+ # Map of additional managed rules to add. The key is the name of the rule
+ # (e.g. ´acm-certificate-expiration-check´) and the value is an object
+ # specifying the rule details
additional_config_rules = {}
- # Map of github repositories to the list of branches that are allowed to assume
- # the IAM role. The repository should be encoded as org/repo-name (e.g.,
- # gruntwork-io/terrraform-aws-ci). Allows GitHub Actions to assume the auto deploy
- # IAM role using an OpenID Connect Provider for the given repositories. Refer to
- # the docs for github-actions-iam-role for more information. Note that this is
- # mutually exclusive with var.allow_auto_deploy_from_other_account_arns. Only used
- # if var.enable_github_actions_access is true.
+ # Map of github repositories to the list of branches that are allowed to
+ # assume the IAM role. The repository should be encoded as org/repo-name
+ # (e.g., gruntwork-io/terrraform-aws-ci). Allows GitHub Actions to assume the
+ # auto deploy IAM role using an OpenID Connect Provider for the given
+ # repositories. Refer to the docs for github-actions-iam-role for more
+ # information. Note that this is mutually exclusive with
+ # var.allow_auto_deploy_from_other_account_arns. Only used if
+ # var.enable_github_actions_access is true.
allow_auto_deploy_from_github_actions_for_sources = {}
- # A list of IAM ARNs from other AWS accounts that will be allowed to assume the
- # auto deploy IAM role that has the permissions in var.auto_deploy_permissions.
+ # A list of IAM ARNs from other AWS accounts that will be allowed to assume
+ # the auto deploy IAM role that has the permissions in
+ # var.auto_deploy_permissions.
allow_auto_deploy_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_auto_deploy_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed full (read and
- # write) access to the billing info for this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed full (read
+ # and write) access to the billing info for this account.
allow_billing_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_billing_access_iam_role_permissions_boundary = null
# If true, an IAM Policy that grants access to CloudTrail will be honored. If
@@ -989,156 +1004,160 @@ inputs = {
# CloudTrail and any IAM Policy grants will be ignored. (true or false)
allow_cloudtrail_access_with_iam = true
- # A list of IAM ARNs from other AWS accounts that will be allowed full (read and
- # write) access to the services in this account specified in
+ # A list of IAM ARNs from other AWS accounts that will be allowed full (read
+ # and write) access to the services in this account specified in
# var.dev_permitted_services.
allow_dev_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_dev_access_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed full (read and
- # write) access to this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed full (read
+ # and write) access to this account.
allow_full_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_full_access_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed access to the
- # logs in CloudTrail, AWS Config, and CloudWatch for this account. Will also be
- # given permissions to decrypt with the KMS CMK that is used to encrypt CloudTrail
- # logs.
+ # A list of IAM ARNs from other AWS accounts that will be allowed access to
+ # the logs in CloudTrail, AWS Config, and CloudWatch for this account. Will
+ # also be given permissions to decrypt with the KMS CMK that is used to
+ # encrypt CloudTrail logs.
allow_logs_access_from_other_account_arns = []
- # A list of IAM ARNs from other AWS accounts that will be allowed read-only access
- # to this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed read-only
+ # access to this account.
allow_read_only_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_read_only_access_iam_role_permissions_boundary = null
- # A list of IAM ARNs from other AWS accounts that will be allowed read access to
- # IAM groups and publish SSH keys. This is used for ssh-grunt.
+ # A list of IAM ARNs from other AWS accounts that will be allowed read access
+ # to IAM groups and publish SSH keys. This is used for ssh-grunt.
allow_ssh_grunt_access_from_other_account_arns = []
- # A list of IAM ARNs from other AWS accounts that will be allowed support access
- # (AWSSupportAccess) to this account.
+ # A list of IAM ARNs from other AWS accounts that will be allowed support
+ # access (AWSSupportAccess) to this account.
allow_support_access_from_other_account_arns = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
allow_support_access_iam_role_permissions_boundary = null
- # A list of IAM permissions (e.g. ec2:*) that will be added to an IAM Group for
- # doing automated deployments. NOTE: If var.should_create_iam_group_auto_deploy is
- # true, the list must have at least one element (e.g. '*').
+ # A list of IAM permissions (e.g. ec2:*) that will be added to an IAM Group
+ # for doing automated deployments. NOTE: If
+ # var.should_create_iam_group_auto_deploy is true, the list must have at least
+ # one element (e.g. '*').
auto_deploy_permissions = []
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
aws_config_iam_role_permissions_boundary = null
# Whether or not to allow kms:DescribeKey to external AWS accounts with write
- # access to the CloudTrail bucket. This is useful during deployment so that you
- # don't have to pass around the KMS key ARN.
+ # access to the CloudTrail bucket. This is useful during deployment so that
+ # you don't have to pass around the KMS key ARN.
cloudtrail_allow_kms_describe_key_to_external_aws_accounts = false
- # Specify the name of the CloudWatch Logs group to publish the CloudTrail logs to.
- # This log group exists in the current account. Set this value to `null` to avoid
- # publishing the trail logs to the logs group. The recommended configuration for
- # CloudTrail is (a) for each child account to aggregate its logs in an S3 bucket
- # in a single central account, such as a logs account and (b) to also store 14
- # days work of logs in CloudWatch in the child account itself for local debugging.
+ # Specify the name of the CloudWatch Logs group to publish the CloudTrail logs
+ # to. This log group exists in the current account. Set this value to `null`
+ # to avoid publishing the trail logs to the logs group. The recommended
+ # configuration for CloudTrail is (a) for each child account to aggregate its
+ # logs in an S3 bucket in a single central account, such as a logs account and
+ # (b) to also store 14 days work of logs in CloudWatch in the child account
+ # itself for local debugging.
cloudtrail_cloudwatch_logs_group_name = "cloudtrail-logs"
# If true, logging of data events will be enabled.
cloudtrail_data_logging_enabled = false
- # Specify if you want your event selector to include management events for your
- # trail.
+ # Specify if you want your event selector to include management events for
+ # your trail.
cloudtrail_data_logging_include_management_events = true
- # Specify if you want your trail to log read-only events, write-only events, or
- # all. Possible values are: ReadOnly, WriteOnly, All.
+ # Specify if you want your trail to log read-only events, write-only events,
+ # or all. Possible values are: ReadOnly, WriteOnly, All.
cloudtrail_data_logging_read_write_type = "All"
- # Data resources for which to log data events. This should be a map, where each
- # key is a data resource type, and each value is a list of data resource values.
- # Possible values for data resource types are: AWS::S3::Object,
- # AWS::Lambda::Function and AWS::DynamoDB::Table. See the 'data_resource' block
- # within the 'event_selector' block of the 'aws_cloudtrail' resource for context:
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # trail#data_resource.
+ # Data resources for which to log data events. This should be a map, where
+ # each key is a data resource type, and each value is a list of data resource
+ # values. Possible values for data resource types are: AWS::S3::Object,
+ # AWS::Lambda::Function and AWS::DynamoDB::Table. See the 'data_resource'
+ # block within the 'event_selector' block of the 'aws_cloudtrail' resource for
+ # context:
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudtrail#data_resource.
cloudtrail_data_logging_resources = {}
- # A list of external AWS accounts that should be given write access for CloudTrail
- # logs to this S3 bucket. This is useful when aggregating CloudTrail logs for
- # multiple AWS accounts in one common S3 bucket.
+ # A list of external AWS accounts that should be given write access for
+ # CloudTrail logs to this S3 bucket. This is useful when aggregating
+ # CloudTrail logs for multiple AWS accounts in one common S3 bucket.
cloudtrail_external_aws_account_ids_with_write_access = []
- # If set to true, when you run 'terraform destroy', delete all objects from the
- # bucket so that the bucket can be destroyed without error. Warning: these objects
- # are not recoverable so only use this if you're absolutely sure you want to
- # permanently delete everything!
+ # If set to true, when you run 'terraform destroy', delete all objects from
+ # the bucket so that the bucket can be destroyed without error. Warning: these
+ # objects are not recoverable so only use this if you're absolutely sure you
+ # want to permanently delete everything!
cloudtrail_force_destroy = false
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role
cloudtrail_iam_role_permissions_boundary = null
# All CloudTrail Logs will be encrypted with a KMS Key (a Customer Master Key)
- # that governs access to write API calls older than 7 days and all read API calls.
- # The IAM Users specified in this list will have rights to change who can access
- # this extended log data.
+ # that governs access to write API calls older than 7 days and all read API
+ # calls. The IAM Users specified in this list will have rights to change who
+ # can access this extended log data.
cloudtrail_kms_key_administrator_iam_arns = []
- # All CloudTrail Logs will be encrypted with a KMS CMK (Customer Master Key) that
- # governs access to write API calls older than 7 days and all read API calls. If
- # that CMK already exists, set this to the ARN of that CMK. Otherwise, set this to
- # null, and a new CMK will be created. We recommend setting this to the ARN of a
- # CMK that already exists in a separate logs account.
+ # All CloudTrail Logs will be encrypted with a KMS CMK (Customer Master Key)
+ # that governs access to write API calls older than 7 days and all read API
+ # calls. If that CMK already exists, set this to the ARN of that CMK.
+ # Otherwise, set this to null, and a new CMK will be created. We recommend
+ # setting this to the ARN of a CMK that already exists in a separate logs
+ # account.
cloudtrail_kms_key_arn = null
- # If the kms_key_arn provided is an alias or alias ARN, then this must be set to
- # true so that the module will exchange the alias for a CMK ARN. Setting this to
- # true and using aliases requires
- # var.cloudtrail_allow_kms_describe_key_to_external_aws_accounts to also be true
- # for multi-account scenarios.
+ # If the kms_key_arn provided is an alias or alias ARN, then this must be set
+ # to true so that the module will exchange the alias for a CMK ARN. Setting
+ # this to true and using aliases requires
+ # var.cloudtrail_allow_kms_describe_key_to_external_aws_accounts to also be
+ # true for multi-account scenarios.
cloudtrail_kms_key_arn_is_alias = false
- # Additional service principals beyond CloudTrail that should have access to the
- # KMS key used to encrypt the logs. This is useful for granting access to the logs
- # for the purposes of constructing metric filters.
+ # Additional service principals beyond CloudTrail that should have access to
+ # the KMS key used to encrypt the logs. This is useful for granting access to
+ # the logs for the purposes of constructing metric filters.
cloudtrail_kms_key_service_principals = []
# All CloudTrail Logs will be encrypted with a KMS Key (a Customer Master Key)
- # that governs access to write API calls older than 7 days and all read API calls.
- # The IAM Users specified in this list will have read-only access to this extended
- # log data.
+ # that governs access to write API calls older than 7 days and all read API
+ # calls. The IAM Users specified in this list will have read-only access to
+ # this extended log data.
cloudtrail_kms_key_user_iam_arns = []
- # After this number of days, log files should be transitioned from S3 to Glacier.
- # Enter 0 to never archive log data.
+ # After this number of days, log files should be transitioned from S3 to
+ # Glacier. Enter 0 to never archive log data.
cloudtrail_num_days_after_which_archive_log_data = 30
- # After this number of days, log files should be deleted from S3. Enter 0 to never
- # delete log data.
+ # After this number of days, log files should be deleted from S3. Enter 0 to
+ # never delete log data.
cloudtrail_num_days_after_which_delete_log_data = 365
- # After this number of days, logs stored in CloudWatch will be deleted. Possible
- # values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827,
- # 3653, and 0 (default). When set to 0, logs will be retained indefinitely.
+ # After this number of days, logs stored in CloudWatch will be deleted.
+ # Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400,
+ # 545, 731, 1827, 3653, and 0 (default). When set to 0, logs will be retained
+ # indefinitely.
cloudtrail_num_days_to_retain_cloudwatch_logs = 0
# Set to false to create an S3 bucket of name var.cloudtrail_s3_bucket_name in
# this account for storing CloudTrail logs. Set to true to assume the bucket
# specified in var.cloudtrail_s3_bucket_name already exists in another AWS
# account. We recommend setting this to true and setting
- # var.cloudtrail_s3_bucket_name to the name of a bucket that already exists in a
- # separate logs account.
+ # var.cloudtrail_s3_bucket_name to the name of a bucket that already exists in
+ # a separate logs account.
cloudtrail_s3_bucket_already_exists = false
# The name of the S3 Bucket where CloudTrail logs will be stored. If value is
@@ -1146,148 +1165,151 @@ inputs = {
cloudtrail_s3_bucket_name = null
# Enable MFA delete for either 'Change the versioning state of your bucket' or
- # 'Permanently delete an object version'. This setting only applies to the bucket
- # used to storage Cloudtrail data. This cannot be used to toggle this setting but
- # is available to allow managed buckets to reflect the state in AWS. For
- # instructions on how to enable MFA Delete, check out the README from the
- # terraform-aws-security/private-s3-bucket module.
+ # 'Permanently delete an object version'. This setting only applies to the
+ # bucket used to storage Cloudtrail data. This cannot be used to toggle this
+ # setting but is available to allow managed buckets to reflect the state in
+ # AWS. For instructions on how to enable MFA Delete, check out the README from
+ # the terraform-aws-security/private-s3-bucket module.
cloudtrail_s3_mfa_delete = false
# Tags to apply to the CloudTrail resources.
cloudtrail_tags = {}
# Set to true to send the AWS Config data to another account (e.g., a logs
- # account) for aggregation purposes. You must set the ID of that other account via
- # the config_central_account_id variable. This redundant variable has to exist
- # because Terraform does not allow computed data in count and for_each parameters
- # and var.config_central_account_id may be computed if its the ID of a
- # newly-created AWS account.
+ # account) for aggregation purposes. You must set the ID of that other account
+ # via the config_central_account_id variable. This redundant variable has to
+ # exist because Terraform does not allow computed data in count and for_each
+ # parameters and var.config_central_account_id may be computed if its the ID
+ # of a newly-created AWS account.
config_aggregate_config_data_in_external_account = false
# If the S3 bucket and SNS topics used for AWS Config live in a different AWS
- # account, set this variable to the ID of that account. If the S3 bucket and SNS
- # topics live in this account, set this variable to null. We recommend setting
- # this to the ID of a separate logs account. Only used if
+ # account, set this variable to the ID of that account. If the S3 bucket and
+ # SNS topics live in this account, set this variable to null. We recommend
+ # setting this to the ID of a separate logs account. Only used if
# var.config_aggregate_config_data_in_external_account is true.
config_central_account_id = null
- # Set to true to create AWS Config rules directly in this account. Set false to
- # not create any Config rules in this account (i.e., if you created the rules at
- # the organization level already). We recommend setting this to true to use
- # account-level rules because org-level rules create a chicken-and-egg problem
- # with creating new accounts.
+ # Set to true to create AWS Config rules directly in this account. Set false
+ # to not create any Config rules in this account (i.e., if you created the
+ # rules at the organization level already). We recommend setting this to true
+ # to use account-level rules because org-level rules create a chicken-and-egg
+ # problem with creating new accounts.
config_create_account_rules = true
# Optional KMS key to use for encrypting S3 objects on the AWS Config delivery
- # channel for an externally managed S3 bucket. This must belong to the same region
- # as the destination S3 bucket. If null, AWS Config will default to encrypting the
- # delivered data with AES-256 encryption. Only used if var.should_create_s3_bucket
- # is false - otherwise, var.kms_key_arn is used.
+ # channel for an externally managed S3 bucket. This must belong to the same
+ # region as the destination S3 bucket. If null, AWS Config will default to
+ # encrypting the delivered data with AES-256 encryption. Only used if
+ # var.should_create_s3_bucket is false - otherwise, var.kms_key_arn is used.
config_delivery_channel_kms_key_arn = null
- # Same as var.config_delivery_channel_kms_key_arn, except the value is a name of a
- # KMS key configured with var.kms_customer_master_keys. The module created KMS key
- # for the delivery region (indexed by the name) will be used. Note that if both
- # var.config_delivery_channel_kms_key_arn and
+ # Same as var.config_delivery_channel_kms_key_arn, except the value is a name
+ # of a KMS key configured with var.kms_customer_master_keys. The module
+ # created KMS key for the delivery region (indexed by the name) will be used.
+ # Note that if both var.config_delivery_channel_kms_key_arn and
# var.config_delivery_channel_kms_key_by_name are configured, the key in
# var.config_delivery_channel_kms_key_arn will always be used.
config_delivery_channel_kms_key_by_name = null
- # If set to true, when you run 'terraform destroy', delete all objects from the
- # bucket so that the bucket can be destroyed without error. Warning: these objects
- # are not recoverable so only use this if you're absolutely sure you want to
- # permanently delete everything!
+ # If set to true, when you run 'terraform destroy', delete all objects from
+ # the bucket so that the bucket can be destroyed without error. Warning: these
+ # objects are not recoverable so only use this if you're absolutely sure you
+ # want to permanently delete everything!
config_force_destroy = false
- # Provide a list of AWS account IDs that will send Config data to this account.
- # This is useful if your aggregating config data in this account for other
- # accounts.
+ # Provide a list of AWS account IDs that will send Config data to this
+ # account. This is useful if your aggregating config data in this account for
+ # other accounts.
config_linked_accounts = []
- # After this number of days, log files should be transitioned from S3 to Glacier.
- # Enter 0 to never archive log data.
+ # After this number of days, log files should be transitioned from S3 to
+ # Glacier. Enter 0 to never archive log data.
config_num_days_after_which_archive_log_data = 365
- # After this number of days, log files should be deleted from S3. Enter 0 to never
- # delete log data.
+ # After this number of days, log files should be deleted from S3. Enter 0 to
+ # never delete log data.
config_num_days_after_which_delete_log_data = 730
- # Optional KMS key to use for encrypting S3 objects on the AWS Config bucket, when
- # the S3 bucket is created within this module (var.config_should_create_s3_bucket
- # is true). For encrypting S3 objects on delivery for an externally managed S3
- # bucket, refer to the var.config_delivery_channel_kms_key_arn input variable. If
- # null, data in S3 will be encrypted using the default aws/s3 key. If provided,
- # the key policy of the provided key must permit the IAM role used by AWS Config.
- # See https://docs.aws.amazon.com/sns/latest/dg/sns-key-management.html. Note that
+ # Optional KMS key to use for encrypting S3 objects on the AWS Config bucket,
+ # when the S3 bucket is created within this module
+ # (var.config_should_create_s3_bucket is true). For encrypting S3 objects on
+ # delivery for an externally managed S3 bucket, refer to the
+ # var.config_delivery_channel_kms_key_arn input variable. If null, data in S3
+ # will be encrypted using the default aws/s3 key. If provided, the key policy
+ # of the provided key must permit the IAM role used by AWS Config. See
+ # https://docs.aws.amazon.com/sns/latest/dg/sns-key-management.html. Note that
# the KMS key must reside in the global recorder region (as configured by
# var.aws_region).
config_s3_bucket_kms_key_arn = null
- # Same as var.config_s3_bucket_kms_key_arn, except the value is a name of a KMS
- # key configured with var.kms_customer_master_keys. The module created KMS key for
- # the global recorder region (indexed by the name) will be used. Note that if both
- # var.config_s3_bucket_kms_key_arn and var.config_s3_bucket_kms_key_by_name are
- # configured, the key in var.config_s3_bucket_kms_key_arn will always be used.
+ # Same as var.config_s3_bucket_kms_key_arn, except the value is a name of a
+ # KMS key configured with var.kms_customer_master_keys. The module created KMS
+ # key for the global recorder region (indexed by the name) will be used. Note
+ # that if both var.config_s3_bucket_kms_key_arn and
+ # var.config_s3_bucket_kms_key_by_name are configured, the key in
+ # var.config_s3_bucket_kms_key_arn will always be used.
config_s3_bucket_kms_key_by_name = null
- # The name of the S3 Bucket where CloudTrail logs will be stored. This could be a
- # bucket in this AWS account or the name of a bucket in another AWS account where
- # logs should be sent. We recommend setting this to the name of a bucket in a
- # separate logs account.
+ # The name of the S3 Bucket where CloudTrail logs will be stored. This could
+ # be a bucket in this AWS account or the name of a bucket in another AWS
+ # account where logs should be sent. We recommend setting this to the name of
+ # a bucket in a separate logs account.
config_s3_bucket_name = null
# Enable MFA delete for either 'Change the versioning state of your bucket' or
- # 'Permanently delete an object version'. This setting only applies to the bucket
- # used to storage AWS Config data. This cannot be used to toggle this setting but
- # is available to allow managed buckets to reflect the state in AWS. For
- # instructions on how to enable MFA Delete, check out the README from the
- # terraform-aws-security/private-s3-bucket module.
+ # 'Permanently delete an object version'. This setting only applies to the
+ # bucket used to storage AWS Config data. This cannot be used to toggle this
+ # setting but is available to allow managed buckets to reflect the state in
+ # AWS. For instructions on how to enable MFA Delete, check out the README from
+ # the terraform-aws-security/private-s3-bucket module.
config_s3_mfa_delete = false
# Set to true to create an S3 bucket of name var.config_s3_bucket_name in this
- # account for storing AWS Config data. Set to false to assume the bucket specified
- # in var.config_s3_bucket_name already exists in another AWS account. We recommend
- # setting this to false and setting var.config_s3_bucket_name to the name off an
- # S3 bucket that already exists in a separate logs account.
+ # account for storing AWS Config data. Set to false to assume the bucket
+ # specified in var.config_s3_bucket_name already exists in another AWS
+ # account. We recommend setting this to false and setting
+ # var.config_s3_bucket_name to the name off an S3 bucket that already exists
+ # in a separate logs account.
config_should_create_s3_bucket = false
# Set to true to create an SNS topic in this account for sending AWS Config
- # notifications (e.g., if this is the logs account). Set to false to assume the
- # topic specified in var.config_sns_topic_name already exists in another AWS
- # account (e.g., if this is the stage or prod account and
+ # notifications (e.g., if this is the logs account). Set to false to assume
+ # the topic specified in var.config_sns_topic_name already exists in another
+ # AWS account (e.g., if this is the stage or prod account and
# var.config_sns_topic_name is the name of an SNS topic in the logs account).
config_should_create_sns_topic = false
- # Same as var.config_sns_topic_kms_key_region_map, except the value is a name of a
- # KMS key configured with var.kms_customer_master_keys. The module created KMS key
- # for each region (indexed by the name) will be used. Note that if an entry exists
- # for a region in both var.config_sns_topic_kms_key_region_map and
+ # Same as var.config_sns_topic_kms_key_region_map, except the value is a name
+ # of a KMS key configured with var.kms_customer_master_keys. The module
+ # created KMS key for each region (indexed by the name) will be used. Note
+ # that if an entry exists for a region in both
+ # var.config_sns_topic_kms_key_region_map and
# var.config_sns_topic_kms_key_by_name_region_map, then the key in
# var.config_sns_topic_kms_key_region_map will always be used.
config_sns_topic_kms_key_by_name_region_map = null
- # Optional KMS key to use for each region for configuring default encryption for
- # the SNS topic (encoded as a map from region - e.g. us-east-1 - to ARN of KMS
- # key). If null or the region key is missing, encryption will not be configured
- # for the SNS topic in that region.
+ # Optional KMS key to use for each region for configuring default encryption
+ # for the SNS topic (encoded as a map from region - e.g. us-east-1 - to ARN of
+ # KMS key). If null or the region key is missing, encryption will not be
+ # configured for the SNS topic in that region.
config_sns_topic_kms_key_region_map = null
- # The name of the SNS Topic in where AWS Config notifications will be sent. Can be
- # in the same account or in another account.
+ # The name of the SNS Topic in where AWS Config notifications will be sent.
+ # Can be in the same account or in another account.
config_sns_topic_name = "ConfigTopic"
- # A map of tags to apply to the S3 Bucket. The key is the tag name and the value
- # is the tag value.
+ # A map of tags to apply to the S3 Bucket. The key is the tag name and the
+ # value is the tag value.
config_tags = {}
- # The maximum frequency with which AWS Config runs evaluations for the ´PERIODIC´
- # rules. See
- # https://www.terraform.io/docs/providers/aws/r/config_organization_managed_rule.h
- # ml#maximum_execution_frequency
+ # The maximum frequency with which AWS Config runs evaluations for the
+ # ´PERIODIC´ rules. See
+ # https://www.terraform.io/docs/providers/aws/r/config_organization_managed_rule.html#maximum_execution_frequency
configrules_maximum_execution_frequency = "TwentyFour_Hours"
- # The name of the IAM group that will grant access to all external AWS accounts in
- # var.iam_groups_for_cross_account_access.
+ # The name of the IAM group that will grant access to all external AWS
+ # accounts in var.iam_groups_for_cross_account_access.
cross_account_access_all_group_name = "_all-accounts"
# A custom name to use for the Cloudtrail Trail. If null, defaults to the
@@ -1295,15 +1317,15 @@ inputs = {
custom_cloudtrail_trail_name = null
# A list of AWS services for which the developers from the accounts in
- # var.allow_dev_access_from_other_account_arns will receive full permissions. See
- # https://goo.gl/ZyoHlz to find the IAM Service name. For example, to grant
- # developers access only to EC2 and Amazon Machine Learning, use the value
- # ["ec2","machinelearning"]. Do NOT add iam to the list of services, or that will
- # grant Developers de facto admin access.
+ # var.allow_dev_access_from_other_account_arns will receive full permissions.
+ # See https://goo.gl/ZyoHlz to find the IAM Service name. For example, to
+ # grant developers access only to EC2 and Amazon Machine Learning, use the
+ # value ["ec2","machinelearning"]. Do NOT add iam to the list of services, or
+ # that will grant Developers de facto admin access.
dev_permitted_services = []
- # If set to true (default), all new EBS volumes will have encryption enabled by
- # default
+ # If set to true (default), all new EBS volumes will have encryption enabled
+ # by default
ebs_enable_encryption = true
# The name of the KMS CMK to use by default for encrypting EBS volumes, if
@@ -1312,15 +1334,15 @@ inputs = {
ebs_kms_key_name = ""
# If set to true, the KMS Customer Managed Keys (CMK) with the name in
- # var.ebs_kms_key_name will be set as the default for EBS encryption. When false
- # (default), the AWS-managed aws/ebs key will be used.
+ # var.ebs_kms_key_name will be set as the default for EBS encryption. When
+ # false (default), the AWS-managed aws/ebs key will be used.
ebs_use_existing_kms_keys = false
- # Set to true (default) to enable CloudTrail in the security account. Set to false
- # to disable CloudTrail (note: all other CloudTrail variables will be ignored).
- # Note that if you have enabled organization trail in the root (parent) account,
- # you should set this to false; the organization trail will enable CloudTrail on
- # child accounts by default.
+ # Set to true (default) to enable CloudTrail in the security account. Set to
+ # false to disable CloudTrail (note: all other CloudTrail variables will be
+ # ignored). Note that if you have enabled organization trail in the root
+ # (parent) account, you should set this to false; the organization trail will
+ # enable CloudTrail on child accounts by default.
enable_cloudtrail = true
# Set to true to enable AWS Config in the security account. Set to false to
@@ -1332,15 +1354,15 @@ inputs = {
# When true, create an Open ID Connect Provider that GitHub actions can use to
# assume IAM roles in the account. Refer to
- # https://docs.github.com/en/actions/deployment/security-hardening-your-deployment
- # /configuring-openid-connect-in-amazon-web-services for more information.
+ # https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services
+ # for more information.
enable_github_actions_access = false
- # Set to true (default) to enable GuardDuty in this app account. Set to false to
- # disable GuardDuty (note: all other GuardDuty variables will be ignored). Note
- # that if you have enabled organization level GuardDuty in the root (parent)
- # account, you should set this to false; the organization GuardDuty will enable
- # GuardDuty on child accounts by default.
+ # Set to true (default) to enable GuardDuty in this app account. Set to false
+ # to disable GuardDuty (note: all other GuardDuty variables will be ignored).
+ # Note that if you have enabled organization level GuardDuty in the root
+ # (parent) account, you should set this to false; the organization GuardDuty
+ # will enable GuardDuty on child accounts by default.
enable_guardduty = true
# A feature flag to enable or disable this module.
@@ -1356,15 +1378,15 @@ inputs = {
# requirements.
enable_iam_password_policy = true
- # Checks whether the security group with 0.0.0.0/0 of any Amazon Virtual Private
- # Cloud (Amazon VPC) allows only specific inbound TCP or UDP traffic.
+ # Checks whether the security group with 0.0.0.0/0 of any Amazon Virtual
+ # Private Cloud (Amazon VPC) allows only specific inbound TCP or UDP traffic.
enable_insecure_sg_rules = true
# Checks whether storage encryption is enabled for your RDS DB instances.
enable_rds_storage_encrypted = true
- # Checks whether users of your AWS account require a multi-factor authentication
- # (MFA) device to sign in with root credentials.
+ # Checks whether users of your AWS account require a multi-factor
+ # authentication (MFA) device to sign in with root credentials.
enable_root_account_mfa = true
# Checks that your Amazon S3 buckets do not allow public read access.
@@ -1378,15 +1400,16 @@ inputs = {
encrypted_volumes_kms_id = null
# When destroying this user, destroy even if it has non-Terraform-managed IAM
- # access keys, login profile, or MFA devices. Without force_destroy a user with
- # non-Terraform-managed access keys and login profile will fail to be destroyed.
+ # access keys, login profile, or MFA devices. Without force_destroy a user
+ # with non-Terraform-managed access keys and login profile will fail to be
+ # destroyed.
force_destroy_users = false
- # When set, use the statically provided hardcoded list of thumbprints rather than
- # looking it up dynamically. This is useful if you want to trade reliability of
- # the OpenID Connect Provider across certificate renewals with a static list that
- # is obtained using a trustworthy mechanism, to mitigate potential damage from a
- # domain hijacking attack on GitHub domains.
+ # When set, use the statically provided hardcoded list of thumbprints rather
+ # than looking it up dynamically. This is useful if you want to trade
+ # reliability of the OpenID Connect Provider across certificate renewals with
+ # a static list that is obtained using a trustworthy mechanism, to mitigate
+ # potential damage from a domain hijacking attack on GitHub domains.
github_actions_openid_connect_provider_thumbprint_list = null
# Name of the Cloudwatch event rules.
@@ -1395,9 +1418,9 @@ inputs = {
# Specifies the frequency of notifications sent for subsequent finding
# occurrences. If the detector is a GuardDuty member account, the value is
# determined by the GuardDuty master account and cannot be modified, otherwise
- # defaults to SIX_HOURS. For standalone and GuardDuty master accounts, it must be
- # configured in Terraform to enable drift detection. Valid values for standalone
- # and master accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS.
+ # defaults to SIX_HOURS. For standalone and GuardDuty master accounts, it must
+ # be configured in Terraform to enable drift detection. Valid values for
+ # standalone and master accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS.
guardduty_finding_publishing_frequency = null
# Specifies a name for the created SNS topics where findings are published.
@@ -1410,17 +1433,17 @@ inputs = {
# The name of the IAM Access Analyzer module
iam_access_analyzer_name = "baseline_security-iam_access_analyzer"
- # If set to ACCOUNT, the analyzer will only be scanning the current AWS account
- # it's in. If set to ORGANIZATION - will scan the organization AWS account and the
- # child accounts.
+ # If set to ACCOUNT, the analyzer will only be scanning the current AWS
+ # account it's in. If set to ORGANIZATION - will scan the organization AWS
+ # account and the child accounts.
iam_access_analyzer_type = "ACCOUNT"
# A list of AWS services for which the developers IAM Group will receive full
# permissions. See https://goo.gl/ZyoHlz to find the IAM Service name. For
- # example, to grant developers access only to EC2 and Amazon Machine Learning, use
- # the value ["ec2","machinelearning"]. Do NOT add iam to the list of services, or
- # that will grant Developers de facto admin access. If you need to grant iam
- # privileges, just grant the user Full Access.
+ # example, to grant developers access only to EC2 and Amazon Machine Learning,
+ # use the value ["ec2","machinelearning"]. Do NOT add iam to the list of
+ # services, or that will grant Developers de facto admin access. If you need
+ # to grant iam privileges, just grant the user Full Access.
iam_group_developers_permitted_services = []
# The name of the IAM Group that allows automated deployment by graning the
@@ -1431,8 +1454,8 @@ inputs = {
# billing features in AWS.
iam_group_name_billing = "billing"
- # The name to be used for the IAM Group that grants IAM Users a reasonable set of
- # permissions for developers.
+ # The name to be used for the IAM Group that grants IAM Users a reasonable set
+ # of permissions for developers.
iam_group_name_developers = "developers"
# The name to be used for the IAM Group that grants full access to all AWS
@@ -1443,49 +1466,49 @@ inputs = {
# Effectively grants administrator access.
iam_group_name_iam_admin = "iam-admin"
- # The name to be used for the IAM Group that grants IAM Users the permissions to
- # manage their own IAM User account.
+ # The name to be used for the IAM Group that grants IAM Users the permissions
+ # to manage their own IAM User account.
iam_group_name_iam_user_self_mgmt = "iam-user-self-mgmt"
- # The name to be used for the IAM Group that grants read access to CloudTrail, AWS
- # Config, and CloudWatch in AWS.
+ # The name to be used for the IAM Group that grants read access to CloudTrail,
+ # AWS Config, and CloudWatch in AWS.
iam_group_name_logs = "logs"
- # The name to be used for the IAM Group that grants read-only access to all AWS
- # resources.
+ # The name to be used for the IAM Group that grants read-only access to all
+ # AWS resources.
iam_group_name_read_only = "read-only"
# The name of the IAM Group that allows access to AWS Support.
iam_group_name_support = "support"
- # The name to be used for the IAM Group that grants IAM Users the permissions to
- # use existing IAM Roles when launching AWS Resources. This does NOT grant the
- # permission to create new IAM Roles.
+ # The name to be used for the IAM Group that grants IAM Users the permissions
+ # to use existing IAM Roles when launching AWS Resources. This does NOT grant
+ # the permission to create new IAM Roles.
iam_group_name_use_existing_iam_roles = "use-existing-iam-roles"
- # The list of names to be used for the IAM Group that enables its members to SSH
- # as a sudo user into any server configured with the ssh-grunt Gruntwork module.
- # Pass in multiple to configure multiple different IAM groups to control different
- # groupings of access at the server level. Pass in empty list to disable creation
- # of the IAM groups.
+ # The list of names to be used for the IAM Group that enables its members to
+ # SSH as a sudo user into any server configured with the ssh-grunt Gruntwork
+ # module. Pass in multiple to configure multiple different IAM groups to
+ # control different groupings of access at the server level. Pass in empty
+ # list to disable creation of the IAM groups.
iam_group_names_ssh_grunt_sudo_users = ["ssh-grunt-sudo-users"]
# The name to be used for the IAM Group that enables its members to SSH as a
- # non-sudo user into any server configured with the ssh-grunt Gruntwork module.
- # Pass in multiple to configure multiple different IAM groups to control different
- # groupings of access at the server level. Pass in empty list to disable creation
- # of the IAM groups.
+ # non-sudo user into any server configured with the ssh-grunt Gruntwork
+ # module. Pass in multiple to configure multiple different IAM groups to
+ # control different groupings of access at the server level. Pass in empty
+ # list to disable creation of the IAM groups.
iam_group_names_ssh_grunt_users = ["ssh-grunt-users"]
- # This variable is used to create groups that allow IAM users to assume roles in
- # your other AWS accounts. It should be a list of objects, where each object has
- # the fields 'group_name', which will be used as the name of the IAM group, and
- # 'iam_role_arns', which is a list of ARNs of IAM Roles that you can assume when
- # part of that group. For each entry in the list of objects, we will create an IAM
- # group that allows users to assume the given IAM role(s) in the other AWS
- # account. This allows you to define all your IAM users in one account (e.g. the
- # users account) and to grant them access to certain IAM roles in other accounts
- # (e.g. the stage, prod, audit accounts).
+ # This variable is used to create groups that allow IAM users to assume roles
+ # in your other AWS accounts. It should be a list of objects, where each
+ # object has the fields 'group_name', which will be used as the name of the
+ # IAM group, and 'iam_role_arns', which is a list of ARNs of IAM Roles that
+ # you can assume when part of that group. For each entry in the list of
+ # objects, we will create an IAM group that allows users to assume the given
+ # IAM role(s) in the other AWS account. This allows you to define all your IAM
+ # users in one account (e.g. the users account) and to grant them access to
+ # certain IAM roles in other accounts (e.g. the stage, prod, audit accounts).
iam_groups_for_cross_account_access = []
# Allow users to change their own password.
@@ -1515,57 +1538,59 @@ inputs = {
# Require at least one uppercase character in password.
iam_password_policy_require_uppercase_characters = true
- # The name to be used for the IAM Policy that grants IAM Users the permissions to
- # manage their own IAM User account.
+ # The name to be used for the IAM Policy that grants IAM Users the permissions
+ # to manage their own IAM User account.
iam_policy_iam_user_self_mgmt = "iam-user-self-mgmt"
# The tags to apply to all the IAM role resources.
iam_role_tags = {}
- # Comma-separated list of TCP ports authorized to be open to 0.0.0.0/0. Ranges are
- # defined by a dash; for example, '443,1020-1025'.
+ # Comma-separated list of TCP ports authorized to be open to 0.0.0.0/0. Ranges
+ # are defined by a dash; for example, '443,1020-1025'.
insecure_sg_rules_authorized_tcp_ports = "443"
- # Comma-separated list of UDP ports authorized to be open to 0.0.0.0/0. Ranges are
- # defined by a dash; for example, '500,1020-1025'.
+ # Comma-separated list of UDP ports authorized to be open to 0.0.0.0/0. Ranges
+ # are defined by a dash; for example, '500,1020-1025'.
insecure_sg_rules_authorized_udp_ports = null
- # A map of tags to apply to all KMS Keys to be created. In this map variable, the
- # key is the tag name and the value is the tag value.
+ # A map of tags to apply to all KMS Keys to be created. In this map variable,
+ # the key is the tag name and the value is the tag value.
kms_cmk_global_tags = {}
# You can use this variable to create account-level KMS Customer Master Keys
- # (CMKs) for encrypting and decrypting data. This variable should be a map where
- # the keys are the names of the CMK and the values are an object that defines the
- # configuration for that CMK. See the comment below for the configuration options
- # you can set for each key.
+ # (CMKs) for encrypting and decrypting data. This variable should be a map
+ # where the keys are the names of the CMK and the values are an object that
+ # defines the configuration for that CMK. See the comment below for the
+ # configuration options you can set for each key.
kms_customer_master_keys = {}
# The map of names of KMS grants to the region where the key resides in. There
- # should be a one to one mapping between entries in this map and the entries of
- # the kms_grants map. This is used to workaround a terraform limitation where the
- # for_each value can not depend on resources.
+ # should be a one to one mapping between entries in this map and the entries
+ # of the kms_grants map. This is used to workaround a terraform limitation
+ # where the for_each value can not depend on resources.
kms_grant_regions = {}
# Create the specified KMS grants to allow entities to use the KMS key without
- # modifying the KMS policy or IAM. This is necessary to allow AWS services (e.g.
- # ASG) to use CMKs encrypt and decrypt resources. The input is a map of grant name
- # to grant properties. The name must be unique per account.
+ # modifying the KMS policy or IAM. This is necessary to allow AWS services
+ # (e.g. ASG) to use CMKs encrypt and decrypt resources. The input is a map of
+ # grant name to grant properties. The name must be unique per account.
kms_grants = {}
- # The maximum allowable session duration, in seconds, for the credentials you get
- # when assuming the IAM roles created by this module. This variable applies to all
- # IAM roles created by this module that are intended for people to use, such as
- # allow-read-only-access-from-other-accounts. For IAM roles that are intended for
- # machine users, such as allow-auto-deploy-from-other-accounts, see
+ # The maximum allowable session duration, in seconds, for the credentials you
+ # get when assuming the IAM roles created by this module. This variable
+ # applies to all IAM roles created by this module that are intended for people
+ # to use, such as allow-read-only-access-from-other-accounts. For IAM roles
+ # that are intended for machine users, such as
+ # allow-auto-deploy-from-other-accounts, see
# var.max_session_duration_machine_users.
max_session_duration_human_users = 43200
- # The maximum allowable session duration, in seconds, for the credentials you get
- # when assuming the IAM roles created by this module. This variable applies to
- # all IAM roles created by this module that are intended for machine users, such
- # as allow-auto-deploy-from-other-accounts. For IAM roles that are intended for
- # human users, such as allow-read-only-access-from-other-accounts, see
+ # The maximum allowable session duration, in seconds, for the credentials you
+ # get when assuming the IAM roles created by this module. This variable
+ # applies to all IAM roles created by this module that are intended for
+ # machine users, such as allow-auto-deploy-from-other-accounts. For IAM roles
+ # that are intended for human users, such as
+ # allow-read-only-access-from-other-accounts, see
# var.max_session_duration_human_users.
max_session_duration_machine_users = 3600
@@ -1578,35 +1603,36 @@ inputs = {
rds_storage_encrypted_kms_id = null
# Create service-linked roles for this set of services. You should pass in the
- # URLs of the services, but without the protocol (e.g., http://) in front: e.g.,
- # use elasticbeanstalk.amazonaws.com for Elastic Beanstalk or es.amazonaws.com for
- # Amazon Elasticsearch. Service-linked roles are predefined by the service, can
- # typically only be assumed by that service, and include all the permissions that
- # the service requires to call other AWS services on your behalf. You can
- # typically only create one such role per AWS account, which is why this parameter
- # exists in the account baseline. See
- # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-wor
- # -with-iam.html for the list of services that support service-linked roles.
+ # URLs of the services, but without the protocol (e.g., http://) in front:
+ # e.g., use elasticbeanstalk.amazonaws.com for Elastic Beanstalk or
+ # es.amazonaws.com for Amazon Elasticsearch. Service-linked roles are
+ # predefined by the service, can typically only be assumed by that service,
+ # and include all the permissions that the service requires to call other AWS
+ # services on your behalf. You can typically only create one such role per AWS
+ # account, which is why this parameter exists in the account baseline. See
+ # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html
+ # for the list of services that support service-linked roles.
service_linked_roles = []
- # Should we create the IAM Group for auto-deploy? Allows automated deployment by
- # granting the permissions specified in var.auto_deploy_permissions. (true or
- # false)
+ # Should we create the IAM Group for auto-deploy? Allows automated deployment
+ # by granting the permissions specified in var.auto_deploy_permissions. (true
+ # or false)
should_create_iam_group_auto_deploy = false
- # Should we create the IAM Group for billing? Allows read-write access to billing
- # features only. (true or false)
+ # Should we create the IAM Group for billing? Allows read-write access to
+ # billing features only. (true or false)
should_create_iam_group_billing = false
# Should we create the IAM Group for access to all external AWS accounts?
should_create_iam_group_cross_account_access_all = true
- # Should we create the IAM Group for developers? The permissions of that group are
- # specified via var.iam_group_developers_permitted_services. (true or false)
+ # Should we create the IAM Group for developers? The permissions of that group
+ # are specified via var.iam_group_developers_permitted_services. (true or
+ # false)
should_create_iam_group_developers = false
- # Should we create the IAM Group for full access? Allows full access to all AWS
- # resources. (true or false)
+ # Should we create the IAM Group for full access? Allows full access to all
+ # AWS resources. (true or false)
should_create_iam_group_full_access = true
# Should we create the IAM Group for IAM administrator access? Allows users to
@@ -1614,49 +1640,49 @@ inputs = {
# false)
should_create_iam_group_iam_admin = false
- # Should we create the IAM Group for logs? Allows read access to CloudTrail, AWS
- # Config, and CloudWatch. If var.cloudtrail_kms_key_arn is set, will also give
- # decrypt access to a KMS CMK. (true or false)
+ # Should we create the IAM Group for logs? Allows read access to CloudTrail,
+ # AWS Config, and CloudWatch. If var.cloudtrail_kms_key_arn is set, will also
+ # give decrypt access to a KMS CMK. (true or false)
should_create_iam_group_logs = false
- # Should we create the IAM Group for read-only? Allows read-only access to all AWS
- # resources. (true or false)
+ # Should we create the IAM Group for read-only? Allows read-only access to all
+ # AWS resources. (true or false)
should_create_iam_group_read_only = false
# Should we create the IAM Group for support? Allows support access
# (AWSupportAccess). (true or false)
should_create_iam_group_support = false
- # Should we create the IAM Group for use-existing-iam-roles? Allow launching AWS
- # resources with existing IAM Roles, but no ability to create new IAM Roles. (true
- # or false)
+ # Should we create the IAM Group for use-existing-iam-roles? Allow launching
+ # AWS resources with existing IAM Roles, but no ability to create new IAM
+ # Roles. (true or false)
should_create_iam_group_use_existing_iam_roles = false
- # Should we create the IAM Group for user self-management? Allows users to manage
- # their own IAM user accounts, but not other IAM users. (true or false)
+ # Should we create the IAM Group for user self-management? Allows users to
+ # manage their own IAM user accounts, but not other IAM users. (true or false)
should_create_iam_group_user_self_mgmt = true
- # Should we require that all IAM Users use Multi-Factor Authentication for both
- # AWS API calls and the AWS Web Console? (true or false)
+ # Should we require that all IAM Users use Multi-Factor Authentication for
+ # both AWS API calls and the AWS Web Console? (true or false)
should_require_mfa = true
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
# A map of users to create. The keys are the user names and the values are an
# object with the optional keys 'groups' (a list of IAM groups to add the user
- # to), 'tags' (a map of tags to apply to the user), 'pgp_key' (either a base-64
- # encoded PGP public key, or a keybase username in the form keybase:username, used
- # to encrypt the user's credentials; required if create_login_profile or
- # create_access_keys is true), 'create_login_profile' (if set to true, create a
- # password to login to the AWS Web Console), 'create_access_keys' (if set to true,
- # create access keys for the user), 'path' (the path), and 'permissions_boundary'
- # (the ARN of the policy that is used to set the permissions boundary for the
- # user).
+ # to), 'tags' (a map of tags to apply to the user), 'pgp_key' (either a
+ # base-64 encoded PGP public key, or a keybase username in the form
+ # keybase:username, used to encrypt the user's credentials; required if
+ # create_login_profile or create_access_keys is true), 'create_login_profile'
+ # (if set to true, create a password to login to the AWS Web Console),
+ # 'create_access_keys' (if set to true, create access keys for the user),
+ # 'path' (the path), and 'permissions_boundary' (the ARN of the policy that is
+ # used to set the permissions boundary for the user).
users = {}
}
@@ -3839,11 +3865,11 @@ A map of usernames to that user's AWS Web Console password, encrypted with that
diff --git a/docs/reference/services/landing-zone/gruntwork-access.md b/docs/reference/services/landing-zone/gruntwork-access.md
index c64b543013..4118b3ebae 100644
--- a/docs/reference/services/landing-zone/gruntwork-access.md
+++ b/docs/reference/services/landing-zone/gruntwork-access.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Gruntwork Access
-View Source
+View Source
Release Notes
@@ -63,7 +63,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -71,7 +71,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog, configure CI / CD for your apps and
@@ -91,21 +91,22 @@ If you want to deploy this repo in production, check out the following resources
module "gruntwork_access" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/gruntwork-access?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/gruntwork-access?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
# ----------------------------------------------------------------------------------------------------
# Set to true to grant your security account, with the account ID specified in
- # var.security_account_id, access to the IAM role. This is required for deploying
- # a Reference Architecture.
+ # var.security_account_id, access to the IAM role. This is required for
+ # deploying a Reference Architecture.
grant_security_account_access =
# The ID of your security account (where IAM users are defined). Required for
# deploying a Reference Architecture, as the Gruntwork team deploys an EC2
- # instance in the security account, and that instance assumes this IAM role to get
- # access to all the other child accounts and bootstrap the deployment process.
+ # instance in the security account, and that instance assumes this IAM role to
+ # get access to all the other child accounts and bootstrap the deployment
+ # process.
security_account_id =
# ----------------------------------------------------------------------------------------------------
@@ -119,11 +120,12 @@ module "gruntwork_access" {
iam_role_name = "GruntworkAccountAccessRole"
# The name of the AWS Managed Policy to attach to the IAM role. To deploy a
- # Reference Architecture, the Gruntwork team needs AdministratorAccess, so this is
- # the default.
+ # Reference Architecture, the Gruntwork team needs AdministratorAccess, so
+ # this is the default.
managed_policy_name = "AdministratorAccess"
- # If set to true, require MFA to assume the IAM role from the Gruntwork account.
+ # If set to true, require MFA to assume the IAM role from the Gruntwork
+ # account.
require_mfa = true
# Tags to apply to all resources created by this module
@@ -144,7 +146,7 @@ module "gruntwork_access" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/gruntwork-access?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/gruntwork-access?ref=v0.104.12"
}
inputs = {
@@ -154,14 +156,15 @@ inputs = {
# ----------------------------------------------------------------------------------------------------
# Set to true to grant your security account, with the account ID specified in
- # var.security_account_id, access to the IAM role. This is required for deploying
- # a Reference Architecture.
+ # var.security_account_id, access to the IAM role. This is required for
+ # deploying a Reference Architecture.
grant_security_account_access =
# The ID of your security account (where IAM users are defined). Required for
# deploying a Reference Architecture, as the Gruntwork team deploys an EC2
- # instance in the security account, and that instance assumes this IAM role to get
- # access to all the other child accounts and bootstrap the deployment process.
+ # instance in the security account, and that instance assumes this IAM role to
+ # get access to all the other child accounts and bootstrap the deployment
+ # process.
security_account_id =
# ----------------------------------------------------------------------------------------------------
@@ -175,11 +178,12 @@ inputs = {
iam_role_name = "GruntworkAccountAccessRole"
# The name of the AWS Managed Policy to attach to the IAM role. To deploy a
- # Reference Architecture, the Gruntwork team needs AdministratorAccess, so this is
- # the default.
+ # Reference Architecture, the Gruntwork team needs AdministratorAccess, so
+ # this is the default.
managed_policy_name = "AdministratorAccess"
- # If set to true, require MFA to assume the IAM role from the Gruntwork account.
+ # If set to true, require MFA to assume the IAM role from the Gruntwork
+ # account.
require_mfa = true
# Tags to apply to all resources created by this module
@@ -292,11 +296,11 @@ The name of the IAM role
diff --git a/docs/reference/services/landing-zone/iam-users-and-iam-groups.md b/docs/reference/services/landing-zone/iam-users-and-iam-groups.md
index ac7284a858..8bafdcb266 100644
--- a/docs/reference/services/landing-zone/iam-users-and-iam-groups.md
+++ b/docs/reference/services/landing-zone/iam-users-and-iam-groups.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# IAM Users and IAM Groups
-View Source
+View Source
Release Notes
@@ -74,9 +74,9 @@ If you’ve never used the Service Catalog before, make sure to read
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/test): Automated tests for the modules and examples.
## Deploy
@@ -84,7 +84,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing/landingzone folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing/landingzone): The
+* [examples/for-learning-and-testing/landingzone folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing/landingzone): The
`examples/for-learning-and-testing/landingzone` folder contains standalone sample code optimized for learning,
experimenting, and testing (but not direct production usage).
@@ -92,7 +92,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -113,7 +113,7 @@ If you want to deploy this repo in production, check out the following resources
module "iam_users_and_groups" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/iam-users-and-groups?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/iam-users-and-groups?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -127,33 +127,35 @@ module "iam_users_and_groups" {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # A list of IAM permissions (e.g. ec2:*) that will be added to an IAM Group for
- # doing automated deployments. NOTE: If var.should_create_iam_group_auto_deploy is
- # true, the list must have at least one element (e.g. '*').
+ # A list of IAM permissions (e.g. ec2:*) that will be added to an IAM Group
+ # for doing automated deployments. NOTE: If
+ # var.should_create_iam_group_auto_deploy is true, the list must have at least
+ # one element (e.g. '*').
auto_deploy_permissions = []
# The ARN of a KMS CMK used to encrypt CloudTrail logs. If set, the logs group
# will include permissions to decrypt using this CMK.
cloudtrail_kms_key_arn = null
- # The name of the IAM group that will grant access to all external AWS accounts in
- # var.iam_groups_for_cross_account_access.
+ # The name of the IAM group that will grant access to all external AWS
+ # accounts in var.iam_groups_for_cross_account_access.
cross_account_access_all_group_name = "_all-accounts"
# A feature flag to enable or disable the IAM Groups module.
enable_iam_groups = true
# When destroying this user, destroy even if it has non-Terraform-managed IAM
- # access keys, login profile, or MFA devices. Without force_destroy a user with
- # non-Terraform-managed access keys and login profile will fail to be destroyed.
+ # access keys, login profile, or MFA devices. Without force_destroy a user
+ # with non-Terraform-managed access keys and login profile will fail to be
+ # destroyed.
force_destroy_users = false
# A list of AWS services for which the developers IAM Group will receive full
# permissions. See https://goo.gl/ZyoHlz to find the IAM Service name. For
- # example, to grant developers access only to EC2 and Amazon Machine Learning, use
- # the value ["ec2","machinelearning"]. Do NOT add iam to the list of services, or
- # that will grant Developers de facto admin access. If you need to grant iam
- # privileges, just grant the user Full Access.
+ # example, to grant developers access only to EC2 and Amazon Machine Learning,
+ # use the value ["ec2","machinelearning"]. Do NOT add iam to the list of
+ # services, or that will grant Developers de facto admin access. If you need
+ # to grant iam privileges, just grant the user Full Access.
iam_group_developers_permitted_services = []
# The name of the IAM Group that allows automated deployment by graning the
@@ -164,8 +166,8 @@ module "iam_users_and_groups" {
# billing features in AWS.
iam_group_name_billing = "billing"
- # The name to be used for the IAM Group that grants IAM Users a reasonable set of
- # permissions for developers.
+ # The name to be used for the IAM Group that grants IAM Users a reasonable set
+ # of permissions for developers.
iam_group_name_developers = "developers"
# The name to be used for the IAM Group that grants full access to all AWS
@@ -176,71 +178,73 @@ module "iam_users_and_groups" {
# Effectively grants administrator access.
iam_group_name_iam_admin = "iam-admin"
- # The name to be used for the IAM Group that grants IAM Users the permissions to
- # manage their own IAM User account.
+ # The name to be used for the IAM Group that grants IAM Users the permissions
+ # to manage their own IAM User account.
iam_group_name_iam_user_self_mgmt = "iam-user-self-mgmt"
- # The name to be used for the IAM Group that grants read access to CloudTrail, AWS
- # Config, and CloudWatch in AWS.
+ # The name to be used for the IAM Group that grants read access to CloudTrail,
+ # AWS Config, and CloudWatch in AWS.
iam_group_name_logs = "logs"
- # The name to be used for the IAM Group that grants read-only access to all AWS
- # resources.
+ # The name to be used for the IAM Group that grants read-only access to all
+ # AWS resources.
iam_group_name_read_only = "read-only"
# The name of the IAM Group that allows access to AWS Support.
iam_group_name_support = "support"
- # The name to be used for the IAM Group that grants IAM Users the permissions to
- # use existing IAM Roles when launching AWS Resources. This does NOT grant the
- # permission to create new IAM Roles.
+ # The name to be used for the IAM Group that grants IAM Users the permissions
+ # to use existing IAM Roles when launching AWS Resources. This does NOT grant
+ # the permission to create new IAM Roles.
iam_group_name_use_existing_iam_roles = "use-existing-iam-roles"
- # The list of names to be used for the IAM Group that enables its members to SSH
- # as a sudo user into any server configured with the ssh-grunt Gruntwork module.
- # Pass in multiple to configure multiple different IAM groups to control different
- # groupings of access at the server level. Pass in empty list to disable creation
- # of the IAM groups.
+ # The list of names to be used for the IAM Group that enables its members to
+ # SSH as a sudo user into any server configured with the ssh-grunt Gruntwork
+ # module. Pass in multiple to configure multiple different IAM groups to
+ # control different groupings of access at the server level. Pass in empty
+ # list to disable creation of the IAM groups.
iam_group_names_ssh_grunt_sudo_users = ["ssh-grunt-sudo-users"]
# The name to be used for the IAM Group that enables its members to SSH as a
- # non-sudo user into any server configured with the ssh-grunt Gruntwork module.
- # Pass in multiple to configure multiple different IAM groups to control different
- # groupings of access at the server level. Pass in empty list to disable creation
- # of the IAM groups.
+ # non-sudo user into any server configured with the ssh-grunt Gruntwork
+ # module. Pass in multiple to configure multiple different IAM groups to
+ # control different groupings of access at the server level. Pass in empty
+ # list to disable creation of the IAM groups.
iam_group_names_ssh_grunt_users = ["ssh-grunt-users"]
- # This variable is used to create groups that allow IAM users to assume roles in
- # your other AWS accounts. It should be a list of objects, where each object has
- # the fields 'group_name', which will be used as the name of the IAM group, and
- # 'iam_role_arns', which is a list of ARNs of IAM Roles that you can assume when
- # part of that group. For each entry in the list of objects, we will create an IAM
- # group that allows users to assume the given IAM role(s) in the other AWS
- # account. This allows you to define all your IAM users in one account (e.g. the
- # users account) and to grant them access to certain IAM roles in other accounts
- # (e.g. the stage, prod, audit accounts).
+ # This variable is used to create groups that allow IAM users to assume roles
+ # in your other AWS accounts. It should be a list of objects, where each
+ # object has the fields 'group_name', which will be used as the name of the
+ # IAM group, and 'iam_role_arns', which is a list of ARNs of IAM Roles that
+ # you can assume when part of that group. For each entry in the list of
+ # objects, we will create an IAM group that allows users to assume the given
+ # IAM role(s) in the other AWS account. This allows you to define all your IAM
+ # users in one account (e.g. the users account) and to grant them access to
+ # certain IAM roles in other accounts (e.g. the stage, prod, audit accounts).
iam_groups_for_cross_account_access = []
- # The name to be used for the IAM Policy that grants IAM Users the permissions to
- # manage their own IAM User account.
+ # The name to be used for the IAM Policy that grants IAM Users the permissions
+ # to manage their own IAM User account.
iam_policy_iam_user_self_mgmt = "iam-user-self-mgmt"
# The tags to apply to all the IAM role resources.
iam_role_tags = {}
- # The maximum allowable session duration, in seconds, for the credentials you get
- # when assuming the IAM roles created by this module. This variable applies to all
- # IAM roles created by this module that are intended for people to use, such as
- # allow-read-only-access-from-other-accounts. For IAM roles that are intended for
- # machine users, such as allow-auto-deploy-from-other-accounts, see
+ # The maximum allowable session duration, in seconds, for the credentials you
+ # get when assuming the IAM roles created by this module. This variable
+ # applies to all IAM roles created by this module that are intended for people
+ # to use, such as allow-read-only-access-from-other-accounts. For IAM roles
+ # that are intended for machine users, such as
+ # allow-auto-deploy-from-other-accounts, see
# var.max_session_duration_machine_users.
max_session_duration_human_users = 43200
- # The maximum allowable session duration, in seconds, for the credentials you get
- # when assuming the IAM roles created by this module. This variable applies to
- # all IAM roles created by this module that are intended for machine users, such
- # as allow-auto-deploy-from-other-accounts. For IAM roles that are intended for
- # human users, such as allow-read-only-access-from-other-accounts, see
+ # The maximum allowable session duration, in seconds, for the credentials you
+ # get when assuming the IAM roles created by this module. This variable
+ # applies to all IAM roles created by this module that are intended for
+ # machine users, such as allow-auto-deploy-from-other-accounts. For IAM roles
+ # that are intended for human users, such as
+ # allow-read-only-access-from-other-accounts, see
# var.max_session_duration_human_users.
max_session_duration_machine_users = 3600
@@ -251,24 +255,25 @@ module "iam_users_and_groups" {
# with create_login_profile set to true.
password_reset_required = true
- # Should we create the IAM Group for auto-deploy? Allows automated deployment by
- # granting the permissions specified in var.auto_deploy_permissions. (true or
- # false)
+ # Should we create the IAM Group for auto-deploy? Allows automated deployment
+ # by granting the permissions specified in var.auto_deploy_permissions. (true
+ # or false)
should_create_iam_group_auto_deploy = false
- # Should we create the IAM Group for billing? Allows read-write access to billing
- # features only. (true or false)
+ # Should we create the IAM Group for billing? Allows read-write access to
+ # billing features only. (true or false)
should_create_iam_group_billing = false
# Should we create the IAM Group for access to all external AWS accounts?
should_create_iam_group_cross_account_access_all = true
- # Should we create the IAM Group for developers? The permissions of that group are
- # specified via var.iam_group_developers_permitted_services. (true or false)
+ # Should we create the IAM Group for developers? The permissions of that group
+ # are specified via var.iam_group_developers_permitted_services. (true or
+ # false)
should_create_iam_group_developers = false
- # Should we create the IAM Group for full access? Allows full access to all AWS
- # resources. (true or false)
+ # Should we create the IAM Group for full access? Allows full access to all
+ # AWS resources. (true or false)
should_create_iam_group_full_access = true
# Should we create the IAM Group for IAM administrator access? Allows users to
@@ -276,42 +281,42 @@ module "iam_users_and_groups" {
# false)
should_create_iam_group_iam_admin = false
- # Should we create the IAM Group for logs? Allows read access to CloudTrail, AWS
- # Config, and CloudWatch. If var.cloudtrail_kms_key_arn is set, will also give
- # decrypt access to a KMS CMK. (true or false)
+ # Should we create the IAM Group for logs? Allows read access to CloudTrail,
+ # AWS Config, and CloudWatch. If var.cloudtrail_kms_key_arn is set, will also
+ # give decrypt access to a KMS CMK. (true or false)
should_create_iam_group_logs = false
- # Should we create the IAM Group for read-only? Allows read-only access to all AWS
- # resources. (true or false)
+ # Should we create the IAM Group for read-only? Allows read-only access to all
+ # AWS resources. (true or false)
should_create_iam_group_read_only = false
# Should we create the IAM Group for support? Allows support access
# (AWSupportAccess). (true or false)
should_create_iam_group_support = false
- # Should we create the IAM Group for use-existing-iam-roles? Allow launching AWS
- # resources with existing IAM Roles, but no ability to create new IAM Roles. (true
- # or false)
+ # Should we create the IAM Group for use-existing-iam-roles? Allow launching
+ # AWS resources with existing IAM Roles, but no ability to create new IAM
+ # Roles. (true or false)
should_create_iam_group_use_existing_iam_roles = false
- # Should we create the IAM Group for user self-management? Allows users to manage
- # their own IAM user accounts, but not other IAM users. (true or false)
+ # Should we create the IAM Group for user self-management? Allows users to
+ # manage their own IAM user accounts, but not other IAM users. (true or false)
should_create_iam_group_user_self_mgmt = true
- # Should we require that all IAM Users use Multi-Factor Authentication for both
- # AWS API calls and the AWS Web Console? (true or false)
+ # Should we require that all IAM Users use Multi-Factor Authentication for
+ # both AWS API calls and the AWS Web Console? (true or false)
should_require_mfa = true
# A map of users to create. The keys are the user names and the values are an
# object with the optional keys 'groups' (a list of IAM groups to add the user
- # to), 'tags' (a map of tags to apply to the user), 'pgp_key' (either a base-64
- # encoded PGP public key, or a keybase username in the form keybase:username, used
- # to encrypt the user's credentials; required if create_login_profile or
- # create_access_keys is true), 'create_login_profile' (if set to true, create a
- # password to login to the AWS Web Console), 'create_access_keys' (if set to true,
- # create access keys for the user), 'path' (the path), and 'permissions_boundary'
- # (the ARN of the policy that is used to set the permissions boundary for the
- # user).
+ # to), 'tags' (a map of tags to apply to the user), 'pgp_key' (either a
+ # base-64 encoded PGP public key, or a keybase username in the form
+ # keybase:username, used to encrypt the user's credentials; required if
+ # create_login_profile or create_access_keys is true), 'create_login_profile'
+ # (if set to true, create a password to login to the AWS Web Console),
+ # 'create_access_keys' (if set to true, create access keys for the user),
+ # 'path' (the path), and 'permissions_boundary' (the ARN of the policy that is
+ # used to set the permissions boundary for the user).
users = {}
}
@@ -329,7 +334,7 @@ module "iam_users_and_groups" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/iam-users-and-groups?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/iam-users-and-groups?ref=v0.104.12"
}
inputs = {
@@ -346,33 +351,35 @@ inputs = {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # A list of IAM permissions (e.g. ec2:*) that will be added to an IAM Group for
- # doing automated deployments. NOTE: If var.should_create_iam_group_auto_deploy is
- # true, the list must have at least one element (e.g. '*').
+ # A list of IAM permissions (e.g. ec2:*) that will be added to an IAM Group
+ # for doing automated deployments. NOTE: If
+ # var.should_create_iam_group_auto_deploy is true, the list must have at least
+ # one element (e.g. '*').
auto_deploy_permissions = []
# The ARN of a KMS CMK used to encrypt CloudTrail logs. If set, the logs group
# will include permissions to decrypt using this CMK.
cloudtrail_kms_key_arn = null
- # The name of the IAM group that will grant access to all external AWS accounts in
- # var.iam_groups_for_cross_account_access.
+ # The name of the IAM group that will grant access to all external AWS
+ # accounts in var.iam_groups_for_cross_account_access.
cross_account_access_all_group_name = "_all-accounts"
# A feature flag to enable or disable the IAM Groups module.
enable_iam_groups = true
# When destroying this user, destroy even if it has non-Terraform-managed IAM
- # access keys, login profile, or MFA devices. Without force_destroy a user with
- # non-Terraform-managed access keys and login profile will fail to be destroyed.
+ # access keys, login profile, or MFA devices. Without force_destroy a user
+ # with non-Terraform-managed access keys and login profile will fail to be
+ # destroyed.
force_destroy_users = false
# A list of AWS services for which the developers IAM Group will receive full
# permissions. See https://goo.gl/ZyoHlz to find the IAM Service name. For
- # example, to grant developers access only to EC2 and Amazon Machine Learning, use
- # the value ["ec2","machinelearning"]. Do NOT add iam to the list of services, or
- # that will grant Developers de facto admin access. If you need to grant iam
- # privileges, just grant the user Full Access.
+ # example, to grant developers access only to EC2 and Amazon Machine Learning,
+ # use the value ["ec2","machinelearning"]. Do NOT add iam to the list of
+ # services, or that will grant Developers de facto admin access. If you need
+ # to grant iam privileges, just grant the user Full Access.
iam_group_developers_permitted_services = []
# The name of the IAM Group that allows automated deployment by graning the
@@ -383,8 +390,8 @@ inputs = {
# billing features in AWS.
iam_group_name_billing = "billing"
- # The name to be used for the IAM Group that grants IAM Users a reasonable set of
- # permissions for developers.
+ # The name to be used for the IAM Group that grants IAM Users a reasonable set
+ # of permissions for developers.
iam_group_name_developers = "developers"
# The name to be used for the IAM Group that grants full access to all AWS
@@ -395,71 +402,73 @@ inputs = {
# Effectively grants administrator access.
iam_group_name_iam_admin = "iam-admin"
- # The name to be used for the IAM Group that grants IAM Users the permissions to
- # manage their own IAM User account.
+ # The name to be used for the IAM Group that grants IAM Users the permissions
+ # to manage their own IAM User account.
iam_group_name_iam_user_self_mgmt = "iam-user-self-mgmt"
- # The name to be used for the IAM Group that grants read access to CloudTrail, AWS
- # Config, and CloudWatch in AWS.
+ # The name to be used for the IAM Group that grants read access to CloudTrail,
+ # AWS Config, and CloudWatch in AWS.
iam_group_name_logs = "logs"
- # The name to be used for the IAM Group that grants read-only access to all AWS
- # resources.
+ # The name to be used for the IAM Group that grants read-only access to all
+ # AWS resources.
iam_group_name_read_only = "read-only"
# The name of the IAM Group that allows access to AWS Support.
iam_group_name_support = "support"
- # The name to be used for the IAM Group that grants IAM Users the permissions to
- # use existing IAM Roles when launching AWS Resources. This does NOT grant the
- # permission to create new IAM Roles.
+ # The name to be used for the IAM Group that grants IAM Users the permissions
+ # to use existing IAM Roles when launching AWS Resources. This does NOT grant
+ # the permission to create new IAM Roles.
iam_group_name_use_existing_iam_roles = "use-existing-iam-roles"
- # The list of names to be used for the IAM Group that enables its members to SSH
- # as a sudo user into any server configured with the ssh-grunt Gruntwork module.
- # Pass in multiple to configure multiple different IAM groups to control different
- # groupings of access at the server level. Pass in empty list to disable creation
- # of the IAM groups.
+ # The list of names to be used for the IAM Group that enables its members to
+ # SSH as a sudo user into any server configured with the ssh-grunt Gruntwork
+ # module. Pass in multiple to configure multiple different IAM groups to
+ # control different groupings of access at the server level. Pass in empty
+ # list to disable creation of the IAM groups.
iam_group_names_ssh_grunt_sudo_users = ["ssh-grunt-sudo-users"]
# The name to be used for the IAM Group that enables its members to SSH as a
- # non-sudo user into any server configured with the ssh-grunt Gruntwork module.
- # Pass in multiple to configure multiple different IAM groups to control different
- # groupings of access at the server level. Pass in empty list to disable creation
- # of the IAM groups.
+ # non-sudo user into any server configured with the ssh-grunt Gruntwork
+ # module. Pass in multiple to configure multiple different IAM groups to
+ # control different groupings of access at the server level. Pass in empty
+ # list to disable creation of the IAM groups.
iam_group_names_ssh_grunt_users = ["ssh-grunt-users"]
- # This variable is used to create groups that allow IAM users to assume roles in
- # your other AWS accounts. It should be a list of objects, where each object has
- # the fields 'group_name', which will be used as the name of the IAM group, and
- # 'iam_role_arns', which is a list of ARNs of IAM Roles that you can assume when
- # part of that group. For each entry in the list of objects, we will create an IAM
- # group that allows users to assume the given IAM role(s) in the other AWS
- # account. This allows you to define all your IAM users in one account (e.g. the
- # users account) and to grant them access to certain IAM roles in other accounts
- # (e.g. the stage, prod, audit accounts).
+ # This variable is used to create groups that allow IAM users to assume roles
+ # in your other AWS accounts. It should be a list of objects, where each
+ # object has the fields 'group_name', which will be used as the name of the
+ # IAM group, and 'iam_role_arns', which is a list of ARNs of IAM Roles that
+ # you can assume when part of that group. For each entry in the list of
+ # objects, we will create an IAM group that allows users to assume the given
+ # IAM role(s) in the other AWS account. This allows you to define all your IAM
+ # users in one account (e.g. the users account) and to grant them access to
+ # certain IAM roles in other accounts (e.g. the stage, prod, audit accounts).
iam_groups_for_cross_account_access = []
- # The name to be used for the IAM Policy that grants IAM Users the permissions to
- # manage their own IAM User account.
+ # The name to be used for the IAM Policy that grants IAM Users the permissions
+ # to manage their own IAM User account.
iam_policy_iam_user_self_mgmt = "iam-user-self-mgmt"
# The tags to apply to all the IAM role resources.
iam_role_tags = {}
- # The maximum allowable session duration, in seconds, for the credentials you get
- # when assuming the IAM roles created by this module. This variable applies to all
- # IAM roles created by this module that are intended for people to use, such as
- # allow-read-only-access-from-other-accounts. For IAM roles that are intended for
- # machine users, such as allow-auto-deploy-from-other-accounts, see
+ # The maximum allowable session duration, in seconds, for the credentials you
+ # get when assuming the IAM roles created by this module. This variable
+ # applies to all IAM roles created by this module that are intended for people
+ # to use, such as allow-read-only-access-from-other-accounts. For IAM roles
+ # that are intended for machine users, such as
+ # allow-auto-deploy-from-other-accounts, see
# var.max_session_duration_machine_users.
max_session_duration_human_users = 43200
- # The maximum allowable session duration, in seconds, for the credentials you get
- # when assuming the IAM roles created by this module. This variable applies to
- # all IAM roles created by this module that are intended for machine users, such
- # as allow-auto-deploy-from-other-accounts. For IAM roles that are intended for
- # human users, such as allow-read-only-access-from-other-accounts, see
+ # The maximum allowable session duration, in seconds, for the credentials you
+ # get when assuming the IAM roles created by this module. This variable
+ # applies to all IAM roles created by this module that are intended for
+ # machine users, such as allow-auto-deploy-from-other-accounts. For IAM roles
+ # that are intended for human users, such as
+ # allow-read-only-access-from-other-accounts, see
# var.max_session_duration_human_users.
max_session_duration_machine_users = 3600
@@ -470,24 +479,25 @@ inputs = {
# with create_login_profile set to true.
password_reset_required = true
- # Should we create the IAM Group for auto-deploy? Allows automated deployment by
- # granting the permissions specified in var.auto_deploy_permissions. (true or
- # false)
+ # Should we create the IAM Group for auto-deploy? Allows automated deployment
+ # by granting the permissions specified in var.auto_deploy_permissions. (true
+ # or false)
should_create_iam_group_auto_deploy = false
- # Should we create the IAM Group for billing? Allows read-write access to billing
- # features only. (true or false)
+ # Should we create the IAM Group for billing? Allows read-write access to
+ # billing features only. (true or false)
should_create_iam_group_billing = false
# Should we create the IAM Group for access to all external AWS accounts?
should_create_iam_group_cross_account_access_all = true
- # Should we create the IAM Group for developers? The permissions of that group are
- # specified via var.iam_group_developers_permitted_services. (true or false)
+ # Should we create the IAM Group for developers? The permissions of that group
+ # are specified via var.iam_group_developers_permitted_services. (true or
+ # false)
should_create_iam_group_developers = false
- # Should we create the IAM Group for full access? Allows full access to all AWS
- # resources. (true or false)
+ # Should we create the IAM Group for full access? Allows full access to all
+ # AWS resources. (true or false)
should_create_iam_group_full_access = true
# Should we create the IAM Group for IAM administrator access? Allows users to
@@ -495,42 +505,42 @@ inputs = {
# false)
should_create_iam_group_iam_admin = false
- # Should we create the IAM Group for logs? Allows read access to CloudTrail, AWS
- # Config, and CloudWatch. If var.cloudtrail_kms_key_arn is set, will also give
- # decrypt access to a KMS CMK. (true or false)
+ # Should we create the IAM Group for logs? Allows read access to CloudTrail,
+ # AWS Config, and CloudWatch. If var.cloudtrail_kms_key_arn is set, will also
+ # give decrypt access to a KMS CMK. (true or false)
should_create_iam_group_logs = false
- # Should we create the IAM Group for read-only? Allows read-only access to all AWS
- # resources. (true or false)
+ # Should we create the IAM Group for read-only? Allows read-only access to all
+ # AWS resources. (true or false)
should_create_iam_group_read_only = false
# Should we create the IAM Group for support? Allows support access
# (AWSupportAccess). (true or false)
should_create_iam_group_support = false
- # Should we create the IAM Group for use-existing-iam-roles? Allow launching AWS
- # resources with existing IAM Roles, but no ability to create new IAM Roles. (true
- # or false)
+ # Should we create the IAM Group for use-existing-iam-roles? Allow launching
+ # AWS resources with existing IAM Roles, but no ability to create new IAM
+ # Roles. (true or false)
should_create_iam_group_use_existing_iam_roles = false
- # Should we create the IAM Group for user self-management? Allows users to manage
- # their own IAM user accounts, but not other IAM users. (true or false)
+ # Should we create the IAM Group for user self-management? Allows users to
+ # manage their own IAM user accounts, but not other IAM users. (true or false)
should_create_iam_group_user_self_mgmt = true
- # Should we require that all IAM Users use Multi-Factor Authentication for both
- # AWS API calls and the AWS Web Console? (true or false)
+ # Should we require that all IAM Users use Multi-Factor Authentication for
+ # both AWS API calls and the AWS Web Console? (true or false)
should_require_mfa = true
# A map of users to create. The keys are the user names and the values are an
# object with the optional keys 'groups' (a list of IAM groups to add the user
- # to), 'tags' (a map of tags to apply to the user), 'pgp_key' (either a base-64
- # encoded PGP public key, or a keybase username in the form keybase:username, used
- # to encrypt the user's credentials; required if create_login_profile or
- # create_access_keys is true), 'create_login_profile' (if set to true, create a
- # password to login to the AWS Web Console), 'create_access_keys' (if set to true,
- # create access keys for the user), 'path' (the path), and 'permissions_boundary'
- # (the ARN of the policy that is used to set the permissions boundary for the
- # user).
+ # to), 'tags' (a map of tags to apply to the user), 'pgp_key' (either a
+ # base-64 encoded PGP public key, or a keybase username in the form
+ # keybase:username, used to encrypt the user's credentials; required if
+ # create_login_profile or create_access_keys is true), 'create_login_profile'
+ # (if set to true, create a password to login to the AWS Web Console),
+ # 'create_access_keys' (if set to true, create access keys for the user),
+ # 'path' (the path), and 'permissions_boundary' (the ARN of the policy that is
+ # used to set the permissions boundary for the user).
users = {}
}
@@ -1127,11 +1137,11 @@ A map of usernames to that user's AWS Web Console password, encrypted with that
diff --git a/docs/reference/services/networking/elastic-load-balancer-elb.md b/docs/reference/services/networking/elastic-load-balancer-elb.md
index 442597defb..d8e9d8e2f5 100644
--- a/docs/reference/services/networking/elastic-load-balancer-elb.md
+++ b/docs/reference/services/networking/elastic-load-balancer-elb.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Application Load Balancer
-View Source
+View Source
Release Notes
@@ -62,7 +62,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -70,7 +70,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -89,7 +89,7 @@ If you want to deploy this repo in production, check out the following resources
module "alb" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/alb?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/alb?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -98,16 +98,16 @@ module "alb" {
# The name of the ALB.
alb_name =
- # If the ALB should only accept traffic from within the VPC, set this to true. If
- # it should accept traffic from the public Internet, set it to false.
+ # If the ALB should only accept traffic from within the VPC, set this to true.
+ # If it should accept traffic from the public Internet, set it to false.
is_internal_alb =
- # After this number of days, log files should be transitioned from S3 to Glacier.
- # Enter 0 to never archive log data.
+ # After this number of days, log files should be transitioned from S3 to
+ # Glacier. Enter 0 to never archive log data.
num_days_after_which_archive_log_data =
- # After this number of days, log files should be deleted from S3. Enter 0 to never
- # delete log data.
+ # After this number of days, log files should be deleted from S3. Enter 0 to
+ # never delete log data.
num_days_after_which_delete_log_data =
# ID of the VPC where the ALB will be deployed
@@ -120,8 +120,8 @@ module "alb" {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # The name to use for the S3 bucket where the ALB access logs will be stored. If
- # you set this to null, a name will be generated automatically based on
+ # The name to use for the S3 bucket where the ALB access logs will be stored.
+ # If you set this to null, a name will be generated automatically based on
# var.alb_name.
access_logs_s3_bucket_name = null
@@ -142,16 +142,16 @@ module "alb" {
# var.https_listener_ports_and_acm_ssl_certs. The keys are the listener ports.
additional_ssl_certs_for_ports = {}
- # Set to true to enable all outbound traffic on this ALB. If set to false, the ALB
- # will allow no outbound traffic by default. This will make the ALB unusuable, so
- # some other code must then update the ALB Security Group to enable outbound
- # access!
+ # Set to true to enable all outbound traffic on this ALB. If set to false, the
+ # ALB will allow no outbound traffic by default. This will make the ALB
+ # unusuable, so some other code must then update the ALB Security Group to
+ # enable outbound access!
allow_all_outbound = true
# The CIDR-formatted IP Address range from which this ALB will allow incoming
# requests. If var.is_internal_alb is false, use the default value. If
- # var.is_internal_alb is true, consider setting this to the VPC's CIDR Block, or
- # something even more restrictive.
+ # var.is_internal_alb is true, consider setting this to the VPC's CIDR Block,
+ # or something even more restrictive.
allow_inbound_from_cidr_blocks = []
# The list of IDs of security groups that should have access to the ALB
@@ -160,33 +160,33 @@ module "alb" {
# Set to true to create a Route 53 DNS A record for this ALB?
create_route53_entry = false
- # Prefix to use for access logs to create a sub-folder in S3 Bucket name where ALB
- # logs should be stored. Only used if var.enable_custom_alb_access_logs_s3_prefix
- # is true.
+ # Prefix to use for access logs to create a sub-folder in S3 Bucket name where
+ # ALB logs should be stored. Only used if
+ # var.enable_custom_alb_access_logs_s3_prefix is true.
custom_alb_access_logs_s3_prefix = null
- # A map of custom tags to apply to the ALB and its Security Group. The key is the
- # tag name and the value is the tag value.
+ # A map of custom tags to apply to the ALB and its Security Group. The key is
+ # the tag name and the value is the tag value.
custom_tags = {}
- # If a request to the load balancer does not match any of your listener rules, the
- # default action will return a fixed response with this body.
+ # If a request to the load balancer does not match any of your listener rules,
+ # the default action will return a fixed response with this body.
default_action_body = null
- # If a request to the load balancer does not match any of your listener rules, the
- # default action will return a fixed response with this content type.
+ # If a request to the load balancer does not match any of your listener rules,
+ # the default action will return a fixed response with this content type.
default_action_content_type = "text/plain"
- # If a request to the load balancer does not match any of your listener rules, the
- # default action will return a fixed response with this status code.
+ # If a request to the load balancer does not match any of your listener rules,
+ # the default action will return a fixed response with this status code.
default_action_status_code = 404
# The list of domain names for the DNS A record to add for the ALB (e.g.
# alb.foo.com). Only used if var.create_route53_entry is true.
domain_names = []
- # If true, the ALB will drop invalid headers. Elastic Load Balancing requires that
- # message header names contain only alphanumeric characters and hyphens.
+ # If true, the ALB will drop invalid headers. Elastic Load Balancing requires
+ # that message header names contain only alphanumeric characters and hyphens.
drop_invalid_header_fields = false
# Set to true to use the value of alb_access_logs_s3_prefix for access logs
@@ -199,65 +199,65 @@ module "alb" {
enable_deletion_protection = false
# A boolean that indicates whether the access logs bucket should be destroyed,
- # even if there are files in it, when you run Terraform destroy. Unless you are
- # using this bucket only for test purposes, you'll want to leave this variable set
- # to false.
+ # even if there are files in it, when you run Terraform destroy. Unless you
+ # are using this bucket only for test purposes, you'll want to leave this
+ # variable set to false.
force_destroy = false
- # The ID of the hosted zone for the DNS A record to add for the ALB. Only used if
- # var.create_route53_entry is true.
+ # The ID of the hosted zone for the DNS A record to add for the ALB. Only used
+ # if var.create_route53_entry is true.
hosted_zone_id = null
- # A list of ports for which an HTTP Listener should be created on the ALB. Tip:
- # When you define Listener Rules for these Listeners, be sure that, for each
- # Listener, at least one Listener Rule uses the '*' path to ensure that every
- # possible request path for that Listener is handled by a Listener Rule. Otherwise
- # some requests won't route to any Target Group.
+ # A list of ports for which an HTTP Listener should be created on the ALB.
+ # Tip: When you define Listener Rules for these Listeners, be sure that, for
+ # each Listener, at least one Listener Rule uses the '*' path to ensure that
+ # every possible request path for that Listener is handled by a Listener Rule.
+ # Otherwise some requests won't route to any Target Group.
http_listener_ports = []
- # A list of the ports for which an HTTPS Listener should be created on the ALB.
- # Each item in the list should be a map with the keys 'port', the port number to
- # listen on, and 'tls_domain_name', the domain name of an SSL/TLS certificate
- # issued by the Amazon Certificate Manager (ACM) to associate with the Listener to
- # be created. If your certificate isn't issued by ACM, specify
- # var.https_listener_ports_and_ssl_certs instead. Tip: When you define Listener
- # Rules for these Listeners, be sure that, for each Listener, at least one
- # Listener Rule uses the '*' path to ensure that every possible request path for
- # that Listener is handled by a Listener Rule. Otherwise some requests won't route
- # to any Target Group.
+ # A list of the ports for which an HTTPS Listener should be created on the
+ # ALB. Each item in the list should be a map with the keys 'port', the port
+ # number to listen on, and 'tls_domain_name', the domain name of an SSL/TLS
+ # certificate issued by the Amazon Certificate Manager (ACM) to associate with
+ # the Listener to be created. If your certificate isn't issued by ACM, specify
+ # var.https_listener_ports_and_ssl_certs instead. Tip: When you define
+ # Listener Rules for these Listeners, be sure that, for each Listener, at
+ # least one Listener Rule uses the '*' path to ensure that every possible
+ # request path for that Listener is handled by a Listener Rule. Otherwise some
+ # requests won't route to any Target Group.
https_listener_ports_and_acm_ssl_certs = []
- # A list of the ports for which an HTTPS Listener should be created on the ALB.
- # Each item in the list should be a map with the keys 'port', the port number to
- # listen on, and 'tls_arn', the Amazon Resource Name (ARN) of the SSL/TLS
- # certificate to associate with the Listener to be created. If your certificate is
- # issued by the Amazon Certificate Manager (ACM), specify
+ # A list of the ports for which an HTTPS Listener should be created on the
+ # ALB. Each item in the list should be a map with the keys 'port', the port
+ # number to listen on, and 'tls_arn', the Amazon Resource Name (ARN) of the
+ # SSL/TLS certificate to associate with the Listener to be created. If your
+ # certificate is issued by the Amazon Certificate Manager (ACM), specify
# var.https_listener_ports_and_acm_ssl_certs instead. Tip: When you define
- # Listener Rules for these Listeners, be sure that, for each Listener, at least
- # one Listener Rule uses the '*' path to ensure that every possible request path
- # for that Listener is handled by a Listener Rule. Otherwise some requests won't
- # route to any Target Group.
+ # Listener Rules for these Listeners, be sure that, for each Listener, at
+ # least one Listener Rule uses the '*' path to ensure that every possible
+ # request path for that Listener is handled by a Listener Rule. Otherwise some
+ # requests won't route to any Target Group.
https_listener_ports_and_ssl_certs = []
- # The time in seconds that the client TCP connection to the ALB is allowed to be
- # idle before the ALB closes the TCP connection.
+ # The time in seconds that the client TCP connection to the ALB is allowed to
+ # be idle before the ALB closes the TCP connection.
idle_timeout = 60
# If true, create a new S3 bucket for access logs with the name in
- # var.access_logs_s3_bucket_name. If false, assume the S3 bucket for access logs
- # with the name in var.access_logs_s3_bucket_name already exists, and don't
- # create a new one. Note that if you set this to false, it's up to you to ensure
- # that the S3 bucket has a bucket policy that grants Elastic Load Balancing
- # permission to write the access logs to your bucket.
+ # var.access_logs_s3_bucket_name. If false, assume the S3 bucket for access
+ # logs with the name in var.access_logs_s3_bucket_name already exists, and
+ # don't create a new one. Note that if you set this to false, it's up to you
+ # to ensure that the S3 bucket has a bucket policy that grants Elastic Load
+ # Balancing permission to write the access logs to your bucket.
should_create_access_logs_bucket = true
- # The AWS predefined TLS/SSL policy for the ALB. A List of policies can be found
- # here:
- # https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https
- # listener.html#describe-ssl-policies. AWS recommends ELBSecurityPolicy-2016-08
- # policy for general use but this policy includes TLSv1.0 which is rapidly being
- # phased out. ELBSecurityPolicy-TLS-1-1-2017-01 is the next policy up that doesn't
- # include TLSv1.0.
+ # The AWS predefined TLS/SSL policy for the ALB. A List of policies can be
+ # found here:
+ # https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies.
+ # AWS recommends ELBSecurityPolicy-2016-08 policy for general use but this
+ # policy includes TLSv1.0 which is rapidly being phased out.
+ # ELBSecurityPolicy-TLS-1-1-2017-01 is the next policy up that doesn't include
+ # TLSv1.0.
ssl_policy = "ELBSecurityPolicy-2016-08"
}
@@ -275,7 +275,7 @@ module "alb" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/alb?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/alb?ref=v0.104.12"
}
inputs = {
@@ -287,16 +287,16 @@ inputs = {
# The name of the ALB.
alb_name =
- # If the ALB should only accept traffic from within the VPC, set this to true. If
- # it should accept traffic from the public Internet, set it to false.
+ # If the ALB should only accept traffic from within the VPC, set this to true.
+ # If it should accept traffic from the public Internet, set it to false.
is_internal_alb =
- # After this number of days, log files should be transitioned from S3 to Glacier.
- # Enter 0 to never archive log data.
+ # After this number of days, log files should be transitioned from S3 to
+ # Glacier. Enter 0 to never archive log data.
num_days_after_which_archive_log_data =
- # After this number of days, log files should be deleted from S3. Enter 0 to never
- # delete log data.
+ # After this number of days, log files should be deleted from S3. Enter 0 to
+ # never delete log data.
num_days_after_which_delete_log_data =
# ID of the VPC where the ALB will be deployed
@@ -309,8 +309,8 @@ inputs = {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # The name to use for the S3 bucket where the ALB access logs will be stored. If
- # you set this to null, a name will be generated automatically based on
+ # The name to use for the S3 bucket where the ALB access logs will be stored.
+ # If you set this to null, a name will be generated automatically based on
# var.alb_name.
access_logs_s3_bucket_name = null
@@ -331,16 +331,16 @@ inputs = {
# var.https_listener_ports_and_acm_ssl_certs. The keys are the listener ports.
additional_ssl_certs_for_ports = {}
- # Set to true to enable all outbound traffic on this ALB. If set to false, the ALB
- # will allow no outbound traffic by default. This will make the ALB unusuable, so
- # some other code must then update the ALB Security Group to enable outbound
- # access!
+ # Set to true to enable all outbound traffic on this ALB. If set to false, the
+ # ALB will allow no outbound traffic by default. This will make the ALB
+ # unusuable, so some other code must then update the ALB Security Group to
+ # enable outbound access!
allow_all_outbound = true
# The CIDR-formatted IP Address range from which this ALB will allow incoming
# requests. If var.is_internal_alb is false, use the default value. If
- # var.is_internal_alb is true, consider setting this to the VPC's CIDR Block, or
- # something even more restrictive.
+ # var.is_internal_alb is true, consider setting this to the VPC's CIDR Block,
+ # or something even more restrictive.
allow_inbound_from_cidr_blocks = []
# The list of IDs of security groups that should have access to the ALB
@@ -349,33 +349,33 @@ inputs = {
# Set to true to create a Route 53 DNS A record for this ALB?
create_route53_entry = false
- # Prefix to use for access logs to create a sub-folder in S3 Bucket name where ALB
- # logs should be stored. Only used if var.enable_custom_alb_access_logs_s3_prefix
- # is true.
+ # Prefix to use for access logs to create a sub-folder in S3 Bucket name where
+ # ALB logs should be stored. Only used if
+ # var.enable_custom_alb_access_logs_s3_prefix is true.
custom_alb_access_logs_s3_prefix = null
- # A map of custom tags to apply to the ALB and its Security Group. The key is the
- # tag name and the value is the tag value.
+ # A map of custom tags to apply to the ALB and its Security Group. The key is
+ # the tag name and the value is the tag value.
custom_tags = {}
- # If a request to the load balancer does not match any of your listener rules, the
- # default action will return a fixed response with this body.
+ # If a request to the load balancer does not match any of your listener rules,
+ # the default action will return a fixed response with this body.
default_action_body = null
- # If a request to the load balancer does not match any of your listener rules, the
- # default action will return a fixed response with this content type.
+ # If a request to the load balancer does not match any of your listener rules,
+ # the default action will return a fixed response with this content type.
default_action_content_type = "text/plain"
- # If a request to the load balancer does not match any of your listener rules, the
- # default action will return a fixed response with this status code.
+ # If a request to the load balancer does not match any of your listener rules,
+ # the default action will return a fixed response with this status code.
default_action_status_code = 404
# The list of domain names for the DNS A record to add for the ALB (e.g.
# alb.foo.com). Only used if var.create_route53_entry is true.
domain_names = []
- # If true, the ALB will drop invalid headers. Elastic Load Balancing requires that
- # message header names contain only alphanumeric characters and hyphens.
+ # If true, the ALB will drop invalid headers. Elastic Load Balancing requires
+ # that message header names contain only alphanumeric characters and hyphens.
drop_invalid_header_fields = false
# Set to true to use the value of alb_access_logs_s3_prefix for access logs
@@ -388,65 +388,65 @@ inputs = {
enable_deletion_protection = false
# A boolean that indicates whether the access logs bucket should be destroyed,
- # even if there are files in it, when you run Terraform destroy. Unless you are
- # using this bucket only for test purposes, you'll want to leave this variable set
- # to false.
+ # even if there are files in it, when you run Terraform destroy. Unless you
+ # are using this bucket only for test purposes, you'll want to leave this
+ # variable set to false.
force_destroy = false
- # The ID of the hosted zone for the DNS A record to add for the ALB. Only used if
- # var.create_route53_entry is true.
+ # The ID of the hosted zone for the DNS A record to add for the ALB. Only used
+ # if var.create_route53_entry is true.
hosted_zone_id = null
- # A list of ports for which an HTTP Listener should be created on the ALB. Tip:
- # When you define Listener Rules for these Listeners, be sure that, for each
- # Listener, at least one Listener Rule uses the '*' path to ensure that every
- # possible request path for that Listener is handled by a Listener Rule. Otherwise
- # some requests won't route to any Target Group.
+ # A list of ports for which an HTTP Listener should be created on the ALB.
+ # Tip: When you define Listener Rules for these Listeners, be sure that, for
+ # each Listener, at least one Listener Rule uses the '*' path to ensure that
+ # every possible request path for that Listener is handled by a Listener Rule.
+ # Otherwise some requests won't route to any Target Group.
http_listener_ports = []
- # A list of the ports for which an HTTPS Listener should be created on the ALB.
- # Each item in the list should be a map with the keys 'port', the port number to
- # listen on, and 'tls_domain_name', the domain name of an SSL/TLS certificate
- # issued by the Amazon Certificate Manager (ACM) to associate with the Listener to
- # be created. If your certificate isn't issued by ACM, specify
- # var.https_listener_ports_and_ssl_certs instead. Tip: When you define Listener
- # Rules for these Listeners, be sure that, for each Listener, at least one
- # Listener Rule uses the '*' path to ensure that every possible request path for
- # that Listener is handled by a Listener Rule. Otherwise some requests won't route
- # to any Target Group.
+ # A list of the ports for which an HTTPS Listener should be created on the
+ # ALB. Each item in the list should be a map with the keys 'port', the port
+ # number to listen on, and 'tls_domain_name', the domain name of an SSL/TLS
+ # certificate issued by the Amazon Certificate Manager (ACM) to associate with
+ # the Listener to be created. If your certificate isn't issued by ACM, specify
+ # var.https_listener_ports_and_ssl_certs instead. Tip: When you define
+ # Listener Rules for these Listeners, be sure that, for each Listener, at
+ # least one Listener Rule uses the '*' path to ensure that every possible
+ # request path for that Listener is handled by a Listener Rule. Otherwise some
+ # requests won't route to any Target Group.
https_listener_ports_and_acm_ssl_certs = []
- # A list of the ports for which an HTTPS Listener should be created on the ALB.
- # Each item in the list should be a map with the keys 'port', the port number to
- # listen on, and 'tls_arn', the Amazon Resource Name (ARN) of the SSL/TLS
- # certificate to associate with the Listener to be created. If your certificate is
- # issued by the Amazon Certificate Manager (ACM), specify
+ # A list of the ports for which an HTTPS Listener should be created on the
+ # ALB. Each item in the list should be a map with the keys 'port', the port
+ # number to listen on, and 'tls_arn', the Amazon Resource Name (ARN) of the
+ # SSL/TLS certificate to associate with the Listener to be created. If your
+ # certificate is issued by the Amazon Certificate Manager (ACM), specify
# var.https_listener_ports_and_acm_ssl_certs instead. Tip: When you define
- # Listener Rules for these Listeners, be sure that, for each Listener, at least
- # one Listener Rule uses the '*' path to ensure that every possible request path
- # for that Listener is handled by a Listener Rule. Otherwise some requests won't
- # route to any Target Group.
+ # Listener Rules for these Listeners, be sure that, for each Listener, at
+ # least one Listener Rule uses the '*' path to ensure that every possible
+ # request path for that Listener is handled by a Listener Rule. Otherwise some
+ # requests won't route to any Target Group.
https_listener_ports_and_ssl_certs = []
- # The time in seconds that the client TCP connection to the ALB is allowed to be
- # idle before the ALB closes the TCP connection.
+ # The time in seconds that the client TCP connection to the ALB is allowed to
+ # be idle before the ALB closes the TCP connection.
idle_timeout = 60
# If true, create a new S3 bucket for access logs with the name in
- # var.access_logs_s3_bucket_name. If false, assume the S3 bucket for access logs
- # with the name in var.access_logs_s3_bucket_name already exists, and don't
- # create a new one. Note that if you set this to false, it's up to you to ensure
- # that the S3 bucket has a bucket policy that grants Elastic Load Balancing
- # permission to write the access logs to your bucket.
+ # var.access_logs_s3_bucket_name. If false, assume the S3 bucket for access
+ # logs with the name in var.access_logs_s3_bucket_name already exists, and
+ # don't create a new one. Note that if you set this to false, it's up to you
+ # to ensure that the S3 bucket has a bucket policy that grants Elastic Load
+ # Balancing permission to write the access logs to your bucket.
should_create_access_logs_bucket = true
- # The AWS predefined TLS/SSL policy for the ALB. A List of policies can be found
- # here:
- # https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https
- # listener.html#describe-ssl-policies. AWS recommends ELBSecurityPolicy-2016-08
- # policy for general use but this policy includes TLSv1.0 which is rapidly being
- # phased out. ELBSecurityPolicy-TLS-1-1-2017-01 is the next policy up that doesn't
- # include TLSv1.0.
+ # The AWS predefined TLS/SSL policy for the ALB. A List of policies can be
+ # found here:
+ # https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies.
+ # AWS recommends ELBSecurityPolicy-2016-08 policy for general use but this
+ # policy includes TLSv1.0 which is rapidly being phased out.
+ # ELBSecurityPolicy-TLS-1-1-2017-01 is the next policy up that doesn't include
+ # TLSv1.0.
ssl_policy = "ELBSecurityPolicy-2016-08"
}
@@ -918,11 +918,11 @@ The AWS-managed DNS name assigned to the ALB.
diff --git a/docs/reference/services/networking/management-vpc.md b/docs/reference/services/networking/management-vpc.md
index c899e6ce82..052472f412 100644
--- a/docs/reference/services/networking/management-vpc.md
+++ b/docs/reference/services/networking/management-vpc.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Management VPC
-View Source
+View Source
Release Notes
@@ -65,9 +65,9 @@ documentation in the [terraform-aws-vpc](https://github.com/gruntwork-io/terrafo
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules): The main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules): The main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/test): Automated tests for the modules and examples.
## Deploy
@@ -75,7 +75,7 @@ documentation in the [terraform-aws-vpc](https://github.com/gruntwork-io/terrafo
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -83,7 +83,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized or direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -105,7 +105,7 @@ If you want to deploy this repo in production, check out the following resources
module "vpc_mgmt" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/vpc-mgmt?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/vpc-mgmt?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -119,9 +119,9 @@ module "vpc_mgmt" {
# '10.100.0.0/16', '10.200.0.0/16', etc.
cidr_block =
- # The number of NAT Gateways to launch for this VPC. The management VPC defaults
- # to 1 NAT Gateway to save on cost, but to increase redundancy, you can adjust
- # this to add additional NAT Gateways.
+ # The number of NAT Gateways to launch for this VPC. The management VPC
+ # defaults to 1 NAT Gateway to save on cost, but to increase redundancy, you
+ # can adjust this to add additional NAT Gateways.
num_nat_gateways =
# The name of the VPC. Defaults to mgmt.
@@ -131,18 +131,18 @@ module "vpc_mgmt" {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # If true, will apply the default NACL rules in var.default_nacl_ingress_rules and
- # var.default_nacl_egress_rules on the default NACL of the VPC. Note that every
- # VPC must have a default NACL - when this is false, the original default NACL
- # rules managed by AWS will be used.
+ # If true, will apply the default NACL rules in var.default_nacl_ingress_rules
+ # and var.default_nacl_egress_rules on the default NACL of the VPC. Note that
+ # every VPC must have a default NACL - when this is false, the original
+ # default NACL rules managed by AWS will be used.
apply_default_nacl_rules = false
- # If true, will associate the default NACL to the public, private, and persistence
- # subnets created by this module. Only used if var.apply_default_nacl_rules is
- # true. Note that this does not guarantee that the subnets are associated with the
- # default NACL. Subnets can only be associated with a single NACL. The default
- # NACL association will be dropped if the subnets are associated with a custom
- # NACL later.
+ # If true, will associate the default NACL to the public, private, and
+ # persistence subnets created by this module. Only used if
+ # var.apply_default_nacl_rules is true. Note that this does not guarantee that
+ # the subnets are associated with the default NACL. Subnets can only be
+ # associated with a single NACL. The default NACL association will be dropped
+ # if the subnets are associated with a custom NACL later.
associate_default_nacl_to_subnets = true
# List of excluded Availability Zone IDs.
@@ -151,142 +151,142 @@ module "vpc_mgmt" {
# List of excluded Availability Zone names.
availability_zone_exclude_names = []
- # Allows to filter list of Availability Zones based on their current state. Can be
- # either "available", "information", "impaired" or "unavailable". By default the
- # list includes a complete set of Availability Zones to which the underlying AWS
- # account has access, regardless of their state.
+ # Allows to filter list of Availability Zones based on their current state.
+ # Can be either "available", "information", "impaired" or "unavailable". By
+ # default the list includes a complete set of Availability Zones to which the
+ # underlying AWS account has access, regardless of their state.
availability_zone_state = null
# If you set this variable to false, this module will not create VPC Flow Logs
- # resources. This is used as a workaround because Terraform does not allow you to
- # use the 'count' parameter on modules. By using this parameter, you can
+ # resources. This is used as a workaround because Terraform does not allow you
+ # to use the 'count' parameter on modules. By using this parameter, you can
# optionally create or not create the resources within this module.
create_flow_logs = true
- # If set to false, this module will NOT create Network ACLs. This is useful if you
- # don't want to use Network ACLs or you want to provide your own Network ACLs
- # outside of this module.
+ # If set to false, this module will NOT create Network ACLs. This is useful if
+ # you don't want to use Network ACLs or you want to provide your own Network
+ # ACLs outside of this module.
create_network_acls = true
- # A map of tags to apply to the VPC, Subnets, Route Tables, and Internet Gateway.
- # The key is the tag name and the value is the tag value. Note that the tag 'Name'
- # is automatically added by this module but may be optionally overwritten by this
- # variable.
+ # A map of tags to apply to the VPC, Subnets, Route Tables, and Internet
+ # Gateway. The key is the tag name and the value is the tag value. Note that
+ # the tag 'Name' is automatically added by this module but may be optionally
+ # overwritten by this variable.
custom_tags = {}
# A map of tags to apply just to the VPC itself, but not any of the other
# resources. The key is the tag name and the value is the tag value. Note that
- # tags defined here will override tags defined as custom_tags in case of conflict.
+ # tags defined here will override tags defined as custom_tags in case of
+ # conflict.
custom_tags_vpc_only = {}
- # The egress rules to apply to the default NACL in the VPC. This is the security
- # group that is used by any subnet that doesn't have its own NACL attached. The
- # value for this variable must be a map where the keys are a unique name for each
- # rule and the values are objects with the same fields as the egress block in the
- # aws_default_network_acl resource:
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/defa
- # lt_network_acl.
+ # The egress rules to apply to the default NACL in the VPC. This is the
+ # security group that is used by any subnet that doesn't have its own NACL
+ # attached. The value for this variable must be a map where the keys are a
+ # unique name for each rule and the values are objects with the same fields as
+ # the egress block in the aws_default_network_acl resource:
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/default_network_acl.
default_nacl_egress_rules = {"AllowAll":{"action":"allow","cidr_block":"0.0.0.0/0","from_port":0,"protocol":"-1","rule_no":100,"to_port":0}}
- # The ingress rules to apply to the default NACL in the VPC. This is the NACL that
- # is used by any subnet that doesn't have its own NACL attached. The value for
- # this variable must be a map where the keys are a unique name for each rule and
- # the values are objects with the same fields as the ingress block in the
- # aws_default_network_acl resource:
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/defa
- # lt_network_acl.
+ # The ingress rules to apply to the default NACL in the VPC. This is the NACL
+ # that is used by any subnet that doesn't have its own NACL attached. The
+ # value for this variable must be a map where the keys are a unique name for
+ # each rule and the values are objects with the same fields as the ingress
+ # block in the aws_default_network_acl resource:
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/default_network_acl.
default_nacl_ingress_rules = {"AllowAll":{"action":"allow","cidr_block":"0.0.0.0/0","from_port":0,"protocol":"-1","rule_no":100,"to_port":0}}
- # The egress rules to apply to the default security group in the VPC. This is the
- # security group that is used by any resource that doesn't have its own security
- # group attached. The value for this variable must be a map where the keys are a
- # unique name for each rule and the values are objects with the same fields as the
- # egress block in the aws_default_security_group resource:
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/defa
- # lt_security_group#egress-block.
+ # The egress rules to apply to the default security group in the VPC. This is
+ # the security group that is used by any resource that doesn't have its own
+ # security group attached. The value for this variable must be a map where the
+ # keys are a unique name for each rule and the values are objects with the
+ # same fields as the egress block in the aws_default_security_group resource:
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/default_security_group#egress-block.
default_security_group_egress_rules = {"AllowAllOutbound":{"cidr_blocks":["0.0.0.0/0"],"from_port":0,"ipv6_cidr_blocks":["::/0"],"protocol":"-1","to_port":0}}
- # The ingress rules to apply to the default security group in the VPC. This is the
- # security group that is used by any resource that doesn't have its own security
- # group attached. The value for this variable must be a map where the keys are a
- # unique name for each rule and the values are objects with the same fields as the
- # ingress block in the aws_default_security_group resource:
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/defa
- # lt_security_group#ingress-block.
+ # The ingress rules to apply to the default security group in the VPC. This is
+ # the security group that is used by any resource that doesn't have its own
+ # security group attached. The value for this variable must be a map where the
+ # keys are a unique name for each rule and the values are objects with the
+ # same fields as the ingress block in the aws_default_security_group resource:
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/default_security_group#ingress-block.
default_security_group_ingress_rules = {"AllowAllFromSelf":{"from_port":0,"protocol":"-1","self":true,"to_port":0}}
# If set to false, the default security groups will NOT be created.
enable_default_security_group = false
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role.
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role.
iam_role_permissions_boundary = null
- # The ARN of a KMS key to use for encrypting VPC the flow log. A new KMS key will
- # be created if this is not supplied.
+ # The ARN of a KMS key to use for encrypting VPC the flow log. A new KMS key
+ # will be created if this is not supplied.
kms_key_arn = null
- # The number of days to retain this KMS Key (a Customer Master Key) after it has
- # been marked for deletion. Setting to null defaults to the provider default,
- # which is the maximum possible value (30 days).
+ # The number of days to retain this KMS Key (a Customer Master Key) after it
+ # has been marked for deletion. Setting to null defaults to the provider
+ # default, which is the maximum possible value (30 days).
kms_key_deletion_window_in_days = null
- # VPC Flow Logs will be encrypted with a KMS Key (a Customer Master Key). The IAM
- # Users specified in this list will have access to this key.
+ # VPC Flow Logs will be encrypted with a KMS Key (a Customer Master Key). The
+ # IAM Users specified in this list will have access to this key.
kms_key_user_iam_arns = null
- # A map of tags to apply to the NAT gateways, on top of the custom_tags. The key
- # is the tag name and the value is the tag value. Note that tags defined here will
- # override tags defined as custom_tags in case of conflict.
+ # A map of tags to apply to the NAT gateways, on top of the custom_tags. The
+ # key is the tag name and the value is the tag value. Note that tags defined
+ # here will override tags defined as custom_tags in case of conflict.
nat_gateway_custom_tags = {}
- # How many AWS Availability Zones (AZs) to use. One subnet of each type (public,
- # private app) will be created in each AZ. Note that this must be less than or
- # equal to the total number of AZs in a region. A value of null means all AZs
- # should be used. For example, if you specify 3 in a region with 5 AZs, subnets
- # will be created in just 3 AZs instead of all 5. Defaults to 3.
+ # How many AWS Availability Zones (AZs) to use. One subnet of each type
+ # (public, private app) will be created in each AZ. Note that this must be
+ # less than or equal to the total number of AZs in a region. A value of null
+ # means all AZs should be used. For example, if you specify 3 in a region with
+ # 5 AZs, subnets will be created in just 3 AZs instead of all 5. Defaults to
+ # 3.
num_availability_zones = null
# Takes the CIDR prefix and adds these many bits to it for calculating subnet
- # ranges. MAKE SURE if you change this you also change the CIDR spacing or you
- # may hit errors. See cidrsubnet interpolation in terraform config for more
- # information.
+ # ranges. MAKE SURE if you change this you also change the CIDR spacing or
+ # you may hit errors. See cidrsubnet interpolation in terraform config for
+ # more information.
private_subnet_bits = 4
- # A map listing the specific CIDR blocks desired for each private subnet. The key
- # must be in the form AZ-0, AZ-1, ... AZ-n where n is the number of Availability
- # Zones. If left blank, we will compute a reasonable CIDR block for each subnet.
+ # A map listing the specific CIDR blocks desired for each private subnet. The
+ # key must be in the form AZ-0, AZ-1, ... AZ-n where n is the number of
+ # Availability Zones. If left blank, we will compute a reasonable CIDR block
+ # for each subnet.
private_subnet_cidr_blocks = {}
- # A map of tags to apply to the private Subnet, on top of the custom_tags. The key
- # is the tag name and the value is the tag value. Note that tags defined here will
- # override tags defined as custom_tags in case of conflict.
+ # A map of tags to apply to the private Subnet, on top of the custom_tags. The
+ # key is the tag name and the value is the tag value. Note that tags defined
+ # here will override tags defined as custom_tags in case of conflict.
private_subnet_custom_tags = {}
# Takes the CIDR prefix and adds these many bits to it for calculating subnet
- # ranges. MAKE SURE if you change this you also change the CIDR spacing or you
- # may hit errors. See cidrsubnet interpolation in terraform config for more
- # information.
+ # ranges. MAKE SURE if you change this you also change the CIDR spacing or
+ # you may hit errors. See cidrsubnet interpolation in terraform config for
+ # more information.
public_subnet_bits = 4
- # A map listing the specific CIDR blocks desired for each public subnet. The key
- # must be in the form AZ-0, AZ-1, ... AZ-n where n is the number of Availability
- # Zones. If left blank, we will compute a reasonable CIDR block for each subnet.
+ # A map listing the specific CIDR blocks desired for each public subnet. The
+ # key must be in the form AZ-0, AZ-1, ... AZ-n where n is the number of
+ # Availability Zones. If left blank, we will compute a reasonable CIDR block
+ # for each subnet.
public_subnet_cidr_blocks = {}
- # A map of tags to apply to the public Subnet, on top of the custom_tags. The key
- # is the tag name and the value is the tag value. Note that tags defined here will
- # override tags defined as custom_tags in case of conflict.
+ # A map of tags to apply to the public Subnet, on top of the custom_tags. The
+ # key is the tag name and the value is the tag value. Note that tags defined
+ # here will override tags defined as custom_tags in case of conflict.
public_subnet_custom_tags = {}
# The amount of spacing between the different subnet types
subnet_spacing = 8
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
}
@@ -304,7 +304,7 @@ module "vpc_mgmt" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/vpc-mgmt?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/vpc-mgmt?ref=v0.104.12"
}
inputs = {
@@ -321,9 +321,9 @@ inputs = {
# '10.100.0.0/16', '10.200.0.0/16', etc.
cidr_block =
- # The number of NAT Gateways to launch for this VPC. The management VPC defaults
- # to 1 NAT Gateway to save on cost, but to increase redundancy, you can adjust
- # this to add additional NAT Gateways.
+ # The number of NAT Gateways to launch for this VPC. The management VPC
+ # defaults to 1 NAT Gateway to save on cost, but to increase redundancy, you
+ # can adjust this to add additional NAT Gateways.
num_nat_gateways =
# The name of the VPC. Defaults to mgmt.
@@ -333,18 +333,18 @@ inputs = {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # If true, will apply the default NACL rules in var.default_nacl_ingress_rules and
- # var.default_nacl_egress_rules on the default NACL of the VPC. Note that every
- # VPC must have a default NACL - when this is false, the original default NACL
- # rules managed by AWS will be used.
+ # If true, will apply the default NACL rules in var.default_nacl_ingress_rules
+ # and var.default_nacl_egress_rules on the default NACL of the VPC. Note that
+ # every VPC must have a default NACL - when this is false, the original
+ # default NACL rules managed by AWS will be used.
apply_default_nacl_rules = false
- # If true, will associate the default NACL to the public, private, and persistence
- # subnets created by this module. Only used if var.apply_default_nacl_rules is
- # true. Note that this does not guarantee that the subnets are associated with the
- # default NACL. Subnets can only be associated with a single NACL. The default
- # NACL association will be dropped if the subnets are associated with a custom
- # NACL later.
+ # If true, will associate the default NACL to the public, private, and
+ # persistence subnets created by this module. Only used if
+ # var.apply_default_nacl_rules is true. Note that this does not guarantee that
+ # the subnets are associated with the default NACL. Subnets can only be
+ # associated with a single NACL. The default NACL association will be dropped
+ # if the subnets are associated with a custom NACL later.
associate_default_nacl_to_subnets = true
# List of excluded Availability Zone IDs.
@@ -353,142 +353,142 @@ inputs = {
# List of excluded Availability Zone names.
availability_zone_exclude_names = []
- # Allows to filter list of Availability Zones based on their current state. Can be
- # either "available", "information", "impaired" or "unavailable". By default the
- # list includes a complete set of Availability Zones to which the underlying AWS
- # account has access, regardless of their state.
+ # Allows to filter list of Availability Zones based on their current state.
+ # Can be either "available", "information", "impaired" or "unavailable". By
+ # default the list includes a complete set of Availability Zones to which the
+ # underlying AWS account has access, regardless of their state.
availability_zone_state = null
# If you set this variable to false, this module will not create VPC Flow Logs
- # resources. This is used as a workaround because Terraform does not allow you to
- # use the 'count' parameter on modules. By using this parameter, you can
+ # resources. This is used as a workaround because Terraform does not allow you
+ # to use the 'count' parameter on modules. By using this parameter, you can
# optionally create or not create the resources within this module.
create_flow_logs = true
- # If set to false, this module will NOT create Network ACLs. This is useful if you
- # don't want to use Network ACLs or you want to provide your own Network ACLs
- # outside of this module.
+ # If set to false, this module will NOT create Network ACLs. This is useful if
+ # you don't want to use Network ACLs or you want to provide your own Network
+ # ACLs outside of this module.
create_network_acls = true
- # A map of tags to apply to the VPC, Subnets, Route Tables, and Internet Gateway.
- # The key is the tag name and the value is the tag value. Note that the tag 'Name'
- # is automatically added by this module but may be optionally overwritten by this
- # variable.
+ # A map of tags to apply to the VPC, Subnets, Route Tables, and Internet
+ # Gateway. The key is the tag name and the value is the tag value. Note that
+ # the tag 'Name' is automatically added by this module but may be optionally
+ # overwritten by this variable.
custom_tags = {}
# A map of tags to apply just to the VPC itself, but not any of the other
# resources. The key is the tag name and the value is the tag value. Note that
- # tags defined here will override tags defined as custom_tags in case of conflict.
+ # tags defined here will override tags defined as custom_tags in case of
+ # conflict.
custom_tags_vpc_only = {}
- # The egress rules to apply to the default NACL in the VPC. This is the security
- # group that is used by any subnet that doesn't have its own NACL attached. The
- # value for this variable must be a map where the keys are a unique name for each
- # rule and the values are objects with the same fields as the egress block in the
- # aws_default_network_acl resource:
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/defa
- # lt_network_acl.
+ # The egress rules to apply to the default NACL in the VPC. This is the
+ # security group that is used by any subnet that doesn't have its own NACL
+ # attached. The value for this variable must be a map where the keys are a
+ # unique name for each rule and the values are objects with the same fields as
+ # the egress block in the aws_default_network_acl resource:
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/default_network_acl.
default_nacl_egress_rules = {"AllowAll":{"action":"allow","cidr_block":"0.0.0.0/0","from_port":0,"protocol":"-1","rule_no":100,"to_port":0}}
- # The ingress rules to apply to the default NACL in the VPC. This is the NACL that
- # is used by any subnet that doesn't have its own NACL attached. The value for
- # this variable must be a map where the keys are a unique name for each rule and
- # the values are objects with the same fields as the ingress block in the
- # aws_default_network_acl resource:
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/defa
- # lt_network_acl.
+ # The ingress rules to apply to the default NACL in the VPC. This is the NACL
+ # that is used by any subnet that doesn't have its own NACL attached. The
+ # value for this variable must be a map where the keys are a unique name for
+ # each rule and the values are objects with the same fields as the ingress
+ # block in the aws_default_network_acl resource:
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/default_network_acl.
default_nacl_ingress_rules = {"AllowAll":{"action":"allow","cidr_block":"0.0.0.0/0","from_port":0,"protocol":"-1","rule_no":100,"to_port":0}}
- # The egress rules to apply to the default security group in the VPC. This is the
- # security group that is used by any resource that doesn't have its own security
- # group attached. The value for this variable must be a map where the keys are a
- # unique name for each rule and the values are objects with the same fields as the
- # egress block in the aws_default_security_group resource:
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/defa
- # lt_security_group#egress-block.
+ # The egress rules to apply to the default security group in the VPC. This is
+ # the security group that is used by any resource that doesn't have its own
+ # security group attached. The value for this variable must be a map where the
+ # keys are a unique name for each rule and the values are objects with the
+ # same fields as the egress block in the aws_default_security_group resource:
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/default_security_group#egress-block.
default_security_group_egress_rules = {"AllowAllOutbound":{"cidr_blocks":["0.0.0.0/0"],"from_port":0,"ipv6_cidr_blocks":["::/0"],"protocol":"-1","to_port":0}}
- # The ingress rules to apply to the default security group in the VPC. This is the
- # security group that is used by any resource that doesn't have its own security
- # group attached. The value for this variable must be a map where the keys are a
- # unique name for each rule and the values are objects with the same fields as the
- # ingress block in the aws_default_security_group resource:
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/defa
- # lt_security_group#ingress-block.
+ # The ingress rules to apply to the default security group in the VPC. This is
+ # the security group that is used by any resource that doesn't have its own
+ # security group attached. The value for this variable must be a map where the
+ # keys are a unique name for each rule and the values are objects with the
+ # same fields as the ingress block in the aws_default_security_group resource:
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/default_security_group#ingress-block.
default_security_group_ingress_rules = {"AllowAllFromSelf":{"from_port":0,"protocol":"-1","self":true,"to_port":0}}
# If set to false, the default security groups will NOT be created.
enable_default_security_group = false
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role.
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role.
iam_role_permissions_boundary = null
- # The ARN of a KMS key to use for encrypting VPC the flow log. A new KMS key will
- # be created if this is not supplied.
+ # The ARN of a KMS key to use for encrypting VPC the flow log. A new KMS key
+ # will be created if this is not supplied.
kms_key_arn = null
- # The number of days to retain this KMS Key (a Customer Master Key) after it has
- # been marked for deletion. Setting to null defaults to the provider default,
- # which is the maximum possible value (30 days).
+ # The number of days to retain this KMS Key (a Customer Master Key) after it
+ # has been marked for deletion. Setting to null defaults to the provider
+ # default, which is the maximum possible value (30 days).
kms_key_deletion_window_in_days = null
- # VPC Flow Logs will be encrypted with a KMS Key (a Customer Master Key). The IAM
- # Users specified in this list will have access to this key.
+ # VPC Flow Logs will be encrypted with a KMS Key (a Customer Master Key). The
+ # IAM Users specified in this list will have access to this key.
kms_key_user_iam_arns = null
- # A map of tags to apply to the NAT gateways, on top of the custom_tags. The key
- # is the tag name and the value is the tag value. Note that tags defined here will
- # override tags defined as custom_tags in case of conflict.
+ # A map of tags to apply to the NAT gateways, on top of the custom_tags. The
+ # key is the tag name and the value is the tag value. Note that tags defined
+ # here will override tags defined as custom_tags in case of conflict.
nat_gateway_custom_tags = {}
- # How many AWS Availability Zones (AZs) to use. One subnet of each type (public,
- # private app) will be created in each AZ. Note that this must be less than or
- # equal to the total number of AZs in a region. A value of null means all AZs
- # should be used. For example, if you specify 3 in a region with 5 AZs, subnets
- # will be created in just 3 AZs instead of all 5. Defaults to 3.
+ # How many AWS Availability Zones (AZs) to use. One subnet of each type
+ # (public, private app) will be created in each AZ. Note that this must be
+ # less than or equal to the total number of AZs in a region. A value of null
+ # means all AZs should be used. For example, if you specify 3 in a region with
+ # 5 AZs, subnets will be created in just 3 AZs instead of all 5. Defaults to
+ # 3.
num_availability_zones = null
# Takes the CIDR prefix and adds these many bits to it for calculating subnet
- # ranges. MAKE SURE if you change this you also change the CIDR spacing or you
- # may hit errors. See cidrsubnet interpolation in terraform config for more
- # information.
+ # ranges. MAKE SURE if you change this you also change the CIDR spacing or
+ # you may hit errors. See cidrsubnet interpolation in terraform config for
+ # more information.
private_subnet_bits = 4
- # A map listing the specific CIDR blocks desired for each private subnet. The key
- # must be in the form AZ-0, AZ-1, ... AZ-n where n is the number of Availability
- # Zones. If left blank, we will compute a reasonable CIDR block for each subnet.
+ # A map listing the specific CIDR blocks desired for each private subnet. The
+ # key must be in the form AZ-0, AZ-1, ... AZ-n where n is the number of
+ # Availability Zones. If left blank, we will compute a reasonable CIDR block
+ # for each subnet.
private_subnet_cidr_blocks = {}
- # A map of tags to apply to the private Subnet, on top of the custom_tags. The key
- # is the tag name and the value is the tag value. Note that tags defined here will
- # override tags defined as custom_tags in case of conflict.
+ # A map of tags to apply to the private Subnet, on top of the custom_tags. The
+ # key is the tag name and the value is the tag value. Note that tags defined
+ # here will override tags defined as custom_tags in case of conflict.
private_subnet_custom_tags = {}
# Takes the CIDR prefix and adds these many bits to it for calculating subnet
- # ranges. MAKE SURE if you change this you also change the CIDR spacing or you
- # may hit errors. See cidrsubnet interpolation in terraform config for more
- # information.
+ # ranges. MAKE SURE if you change this you also change the CIDR spacing or
+ # you may hit errors. See cidrsubnet interpolation in terraform config for
+ # more information.
public_subnet_bits = 4
- # A map listing the specific CIDR blocks desired for each public subnet. The key
- # must be in the form AZ-0, AZ-1, ... AZ-n where n is the number of Availability
- # Zones. If left blank, we will compute a reasonable CIDR block for each subnet.
+ # A map listing the specific CIDR blocks desired for each public subnet. The
+ # key must be in the form AZ-0, AZ-1, ... AZ-n where n is the number of
+ # Availability Zones. If left blank, we will compute a reasonable CIDR block
+ # for each subnet.
public_subnet_cidr_blocks = {}
- # A map of tags to apply to the public Subnet, on top of the custom_tags. The key
- # is the tag name and the value is the tag value. Note that tags defined here will
- # override tags defined as custom_tags in case of conflict.
+ # A map of tags to apply to the public Subnet, on top of the custom_tags. The
+ # key is the tag name and the value is the tag value. Note that tags defined
+ # here will override tags defined as custom_tags in case of conflict.
public_subnet_custom_tags = {}
# The amount of spacing between the different subnet types
subnet_spacing = 8
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
}
@@ -1006,11 +1006,11 @@ Indicates whether or not the VPC has finished creating
diff --git a/docs/reference/services/networking/route-53-hosted-zones.md b/docs/reference/services/networking/route-53-hosted-zones.md
index 72b3ea4194..d4ff6681de 100644
--- a/docs/reference/services/networking/route-53-hosted-zones.md
+++ b/docs/reference/services/networking/route-53-hosted-zones.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Route 53 Hosted Zones
-View Source
+View Source
Release Notes
@@ -49,7 +49,7 @@ If you’ve never used the Service Catalog before, make sure to read
:::
-* [Should you use AWS Route 53 or CloudMap for your DNS entries?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/networking/route53/core-concepts.md#should-i-use-route53-or-cloud-map)
+* [Should you use AWS Route 53 or CloudMap for your DNS entries?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/networking/route53/core-concepts.md#should-i-use-route53-or-cloud-map)
* [AWS Cloud Map Documentation](https://docs.aws.amazon.com/cloud-map/latest/dg/what-is-cloud-map.html): Amazon’s docs
for AWS Cloud Map that cover core concepts and configuration.
* [Route 53 Documentation](https://docs.aws.amazon.com/route53/): Amazon’s docs for Route 53 that cover core concepts
@@ -61,7 +61,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -69,7 +69,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -88,7 +88,7 @@ If you want to deploy this repo in production, check out the following resources
module "route_53" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/route53?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/route53?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# OPTIONAL VARIABLES
@@ -98,16 +98,17 @@ module "route_53" {
# domain name. See examples below.
private_zones = {}
- # A map of public Route 53 Hosted Zones. In this map, the key should be the domain
- # name. See examples below.
+ # A map of public Route 53 Hosted Zones. In this map, the key should be the
+ # domain name. See examples below.
public_zones = {}
- # A map of domain names to configurations for setting up a new private namespace
- # in AWS Cloud Map.
+ # A map of domain names to configurations for setting up a new private
+ # namespace in AWS Cloud Map.
service_discovery_private_namespaces = {}
- # A map of domain names to configurations for setting up a new public namespace in
- # AWS Cloud Map. Note that the domain name must be registered with Route 53.
+ # A map of domain names to configurations for setting up a new public
+ # namespace in AWS Cloud Map. Note that the domain name must be registered
+ # with Route 53.
service_discovery_public_namespaces = {}
}
@@ -125,7 +126,7 @@ module "route_53" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/route53?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/route53?ref=v0.104.12"
}
inputs = {
@@ -138,16 +139,17 @@ inputs = {
# domain name. See examples below.
private_zones = {}
- # A map of public Route 53 Hosted Zones. In this map, the key should be the domain
- # name. See examples below.
+ # A map of public Route 53 Hosted Zones. In this map, the key should be the
+ # domain name. See examples below.
public_zones = {}
- # A map of domain names to configurations for setting up a new private namespace
- # in AWS Cloud Map.
+ # A map of domain names to configurations for setting up a new private
+ # namespace in AWS Cloud Map.
service_discovery_private_namespaces = {}
- # A map of domain names to configurations for setting up a new public namespace in
- # AWS Cloud Map. Note that the domain name must be registered with Route 53.
+ # A map of domain names to configurations for setting up a new public
+ # namespace in AWS Cloud Map. Note that the domain name must be registered
+ # with Route 53.
service_discovery_public_namespaces = {}
}
@@ -547,11 +549,11 @@ A map of domains to resource arns and hosted zones of the created Service Discov
diff --git a/docs/reference/services/networking/sns-topics.md b/docs/reference/services/networking/sns-topics.md
index 27daf26723..f729c7e6bf 100644
--- a/docs/reference/services/networking/sns-topics.md
+++ b/docs/reference/services/networking/sns-topics.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon Simple Notification Service
-View Source
+View Source
Release Notes
@@ -48,8 +48,8 @@ If you’ve never used the Service Catalog before, make sure to read
:::
* [SNS Documentation](https://docs.aws.amazon.com/sns/): Amazon’s docs for SNS that cover core concepts and configuration
-* [How do SNS topics work?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/networking/sns-topics/core-concepts.md#how-do-sns-topics-work)
-* [How do I get notified when a message is published to an SNS Topic?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/networking/sns-topics/core-concepts.md#how-do-i-get-notified)
+* [How do SNS topics work?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/networking/sns-topics/core-concepts.md#how-do-sns-topics-work)
+* [How do I get notified when a message is published to an SNS Topic?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/networking/sns-topics/core-concepts.md#how-do-i-get-notified)
## Deploy
@@ -57,7 +57,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -65,7 +65,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -84,7 +84,7 @@ If you want to deploy this repo in production, check out the following resources
module "sns_topics" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/sns-topics?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/sns-topics?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -97,34 +97,37 @@ module "sns_topics" {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # A list of IAM ARNs that will be given the rights to publish to the SNS topic.
+ # A list of IAM ARNs that will be given the rights to publish to the SNS
+ # topic.
allow_publish_accounts = []
# A list of AWS services that will be given the rights to publish to the SNS
# topic.
allow_publish_services = []
- # A list of IAM ARNs that will be given the rights to subscribe to the SNS topic.
+ # A list of IAM ARNs that will be given the rights to subscribe to the SNS
+ # topic.
allow_subscribe_accounts = []
# A list of protocols that can be used to subscribe to the SNS topic.
allow_subscribe_protocols = ["http","https","email","email-json","sms","sqs","application","lambda"]
# Set to false to have this module create no resources. This weird parameter
- # exists solely because Terraform does not support conditional modules. Therefore,
- # this is a hack to allow you to conditionally decide if the resources should be
- # created or not.
+ # exists solely because Terraform does not support conditional modules.
+ # Therefore, this is a hack to allow you to conditionally decide if the
+ # resources should be created or not.
create_resources = true
# The display name of the SNS topic
display_name = ""
- # The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom
- # CMK
+ # The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a
+ # custom CMK
kms_master_key_id = "alias/aws/sns"
- # The ARN of a Secrets Manager entry that contains the Slack Webhook URL (e.g.,
- # https://hooks.slack.com/services/FOO/BAR/BAZ) that SNS messages are sent to.
+ # The ARN of a Secrets Manager entry that contains the Slack Webhook URL
+ # (e.g., https://hooks.slack.com/services/FOO/BAR/BAZ) that SNS messages are
+ # sent to.
slack_webhook_url_secrets_manager_arn = null
}
@@ -142,7 +145,7 @@ module "sns_topics" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/sns-topics?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/sns-topics?ref=v0.104.12"
}
inputs = {
@@ -158,34 +161,37 @@ inputs = {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # A list of IAM ARNs that will be given the rights to publish to the SNS topic.
+ # A list of IAM ARNs that will be given the rights to publish to the SNS
+ # topic.
allow_publish_accounts = []
# A list of AWS services that will be given the rights to publish to the SNS
# topic.
allow_publish_services = []
- # A list of IAM ARNs that will be given the rights to subscribe to the SNS topic.
+ # A list of IAM ARNs that will be given the rights to subscribe to the SNS
+ # topic.
allow_subscribe_accounts = []
# A list of protocols that can be used to subscribe to the SNS topic.
allow_subscribe_protocols = ["http","https","email","email-json","sms","sqs","application","lambda"]
# Set to false to have this module create no resources. This weird parameter
- # exists solely because Terraform does not support conditional modules. Therefore,
- # this is a hack to allow you to conditionally decide if the resources should be
- # created or not.
+ # exists solely because Terraform does not support conditional modules.
+ # Therefore, this is a hack to allow you to conditionally decide if the
+ # resources should be created or not.
create_resources = true
# The display name of the SNS topic
display_name = ""
- # The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom
- # CMK
+ # The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a
+ # custom CMK
kms_master_key_id = "alias/aws/sns"
- # The ARN of a Secrets Manager entry that contains the Slack Webhook URL (e.g.,
- # https://hooks.slack.com/services/FOO/BAR/BAZ) that SNS messages are sent to.
+ # The ARN of a Secrets Manager entry that contains the Slack Webhook URL
+ # (e.g., https://hooks.slack.com/services/FOO/BAR/BAZ) that SNS messages are
+ # sent to.
slack_webhook_url_secrets_manager_arn = null
}
@@ -321,11 +327,11 @@ The ARN of the SNS topic.
diff --git a/docs/reference/services/networking/virtual-private-cloud-vpc.md b/docs/reference/services/networking/virtual-private-cloud-vpc.md
index 8a23ad6952..1257ae8b9b 100644
--- a/docs/reference/services/networking/virtual-private-cloud-vpc.md
+++ b/docs/reference/services/networking/virtual-private-cloud-vpc.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# VPC
-View Source
+View Source
Release Notes
@@ -65,9 +65,9 @@ documentation in the [terraform-aws-vpc](https://github.com/gruntwork-io/terrafo
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules): The main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules): The main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/test): Automated tests for the modules and examples.
## Deploy
@@ -75,7 +75,7 @@ documentation in the [terraform-aws-vpc](https://github.com/gruntwork-io/terrafo
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -83,7 +83,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -105,7 +105,7 @@ If you want to deploy this repo in production, check out the following resources
module "vpc" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/vpc?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/vpc?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -116,10 +116,10 @@ module "vpc" {
# '10.100.0.0/18', '10.200.0.0/18', etc.
cidr_block =
- # The number of NAT Gateways to launch for this VPC. For production VPCs, a NAT
- # Gateway should be placed in each Availability Zone (so likely 3 total), whereas
- # for non-prod VPCs, just one Availability Zone (and hence 1 NAT Gateway) will
- # suffice.
+ # The number of NAT Gateways to launch for this VPC. For production VPCs, a
+ # NAT Gateway should be placed in each Availability Zone (so likely 3 total),
+ # whereas for non-prod VPCs, just one Availability Zone (and hence 1 NAT
+ # Gateway) will suffice.
num_nat_gateways =
# Name of the VPC. Examples include 'prod', 'dev', 'mgmt', etc.
@@ -133,51 +133,51 @@ module "vpc" {
# internet?
allow_private_persistence_internet_access = false
- # If true, will apply the default NACL rules in var.default_nacl_ingress_rules and
- # var.default_nacl_egress_rules on the default NACL of the VPC. Note that every
- # VPC must have a default NACL - when this is false, the original default NACL
- # rules managed by AWS will be used.
+ # If true, will apply the default NACL rules in var.default_nacl_ingress_rules
+ # and var.default_nacl_egress_rules on the default NACL of the VPC. Note that
+ # every VPC must have a default NACL - when this is false, the original
+ # default NACL rules managed by AWS will be used.
apply_default_nacl_rules = false
- # If true, will associate the default NACL to the public, private, and persistence
- # subnets created by this module. Only used if var.apply_default_nacl_rules is
- # true. Note that this does not guarantee that the subnets are associated with the
- # default NACL. Subnets can only be associated with a single NACL. The default
- # NACL association will be dropped if the subnets are associated with a custom
- # NACL later.
+ # If true, will associate the default NACL to the public, private, and
+ # persistence subnets created by this module. Only used if
+ # var.apply_default_nacl_rules is true. Note that this does not guarantee that
+ # the subnets are associated with the default NACL. Subnets can only be
+ # associated with a single NACL. The default NACL association will be dropped
+ # if the subnets are associated with a custom NACL later.
associate_default_nacl_to_subnets = true
- # Specific Availability Zones in which subnets SHOULD NOT be created. Useful for
- # when features / support is missing from a given AZ.
+ # Specific Availability Zones in which subnets SHOULD NOT be created. Useful
+ # for when features / support is missing from a given AZ.
availability_zone_exclude_names = []
- # DEPRECATED. The AWS Region where this VPC will exist. This variable is no longer
- # used and only kept around for backwards compatibility. We now automatically
- # fetch the region using a data source.
+ # DEPRECATED. The AWS Region where this VPC will exist. This variable is no
+ # longer used and only kept around for backwards compatibility. We now
+ # automatically fetch the region using a data source.
aws_region = ""
# Whether or not to create DNS forwarders from the Mgmt VPC to the App VPC to
- # resolve private Route 53 endpoints. This is most useful when you want to keep
- # your EKS Kubernetes API endpoint private to the VPC, but want to access it from
- # the Mgmt VPC (where your VPN/Bastion servers are).
+ # resolve private Route 53 endpoints. This is most useful when you want to
+ # keep your EKS Kubernetes API endpoint private to the VPC, but want to access
+ # it from the Mgmt VPC (where your VPN/Bastion servers are).
create_dns_forwarder = false
# If you set this variable to false, this module will not create VPC Flow Logs
- # resources. This is used as a workaround because Terraform does not allow you to
- # use the 'count' parameter on modules. By using this parameter, you can
+ # resources. This is used as a workaround because Terraform does not allow you
+ # to use the 'count' parameter on modules. By using this parameter, you can
# optionally create or not create the resources within this module.
create_flow_logs = true
- # Whether the VPC will create an Internet Gateway. There are use cases when the
- # VPC is desired to not be routable from the internet, and hence, they should not
- # have an Internet Gateway. For example, when it is desired that public subnets
- # exist but they are not directly public facing, since they can be routed from
- # other VPC hosting the IGW.
+ # Whether the VPC will create an Internet Gateway. There are use cases when
+ # the VPC is desired to not be routable from the internet, and hence, they
+ # should not have an Internet Gateway. For example, when it is desired that
+ # public subnets exist but they are not directly public facing, since they can
+ # be routed from other VPC hosting the IGW.
create_igw = true
- # If set to false, this module will NOT create Network ACLs. This is useful if you
- # don't want to use Network ACLs or you want to provide your own Network ACLs
- # outside of this module.
+ # If set to false, this module will NOT create Network ACLs. This is useful if
+ # you don't want to use Network ACLs or you want to provide your own Network
+ # ACLs outside of this module.
create_network_acls = true
# Whether or not to create a peering connection to another VPC.
@@ -203,56 +203,53 @@ module "vpc" {
create_public_subnet_nacls = true
# If set to false, this module will NOT create the public subnet tier. This is
- # useful for VPCs that only need private subnets. Note that setting this to false
- # also means the module will NOT create an Internet Gateway or the NAT gateways,
- # so if you want any public Internet access in the VPC (even outbound access—e.g.,
- # to run apt get), you'll need to provide it yourself via some other mechanism
- # (e.g., via VPC peering, a Transit Gateway, Direct Connect, etc).
+ # useful for VPCs that only need private subnets. Note that setting this to
+ # false also means the module will NOT create an Internet Gateway or the NAT
+ # gateways, so if you want any public Internet access in the VPC (even
+ # outbound access—e.g., to run apt get), you'll need to provide it yourself
+ # via some other mechanism (e.g., via VPC peering, a Transit Gateway, Direct
+ # Connect, etc).
create_public_subnets = true
# Create VPC endpoints for S3 and DynamoDB.
create_vpc_endpoints = true
# A map of tags to apply to the VPC, Subnets, Route Tables, Internet Gateway,
- # default security group, and default NACLs. The key is the tag name and the value
- # is the tag value. Note that the tag 'Name' is automatically added by this module
- # but may be optionally overwritten by this variable.
+ # default security group, and default NACLs. The key is the tag name and the
+ # value is the tag value. Note that the tag 'Name' is automatically added by
+ # this module but may be optionally overwritten by this variable.
custom_tags = {}
- # The egress rules to apply to the default NACL in the VPC. This is the security
- # group that is used by any subnet that doesn't have its own NACL attached. The
- # value for this variable must be a map where the keys are a unique name for each
- # rule and the values are objects with the same fields as the egress block in the
- # aws_default_network_acl resource:
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/defa
- # lt_network_acl.
+ # The egress rules to apply to the default NACL in the VPC. This is the
+ # security group that is used by any subnet that doesn't have its own NACL
+ # attached. The value for this variable must be a map where the keys are a
+ # unique name for each rule and the values are objects with the same fields as
+ # the egress block in the aws_default_network_acl resource:
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/default_network_acl.
default_nacl_egress_rules = {"AllowAll":{"action":"allow","cidr_block":"0.0.0.0/0","from_port":0,"protocol":"-1","rule_no":100,"to_port":0}}
- # The ingress rules to apply to the default NACL in the VPC. This is the NACL that
- # is used by any subnet that doesn't have its own NACL attached. The value for
- # this variable must be a map where the keys are a unique name for each rule and
- # the values are objects with the same fields as the ingress block in the
- # aws_default_network_acl resource:
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/defa
- # lt_network_acl.
+ # The ingress rules to apply to the default NACL in the VPC. This is the NACL
+ # that is used by any subnet that doesn't have its own NACL attached. The
+ # value for this variable must be a map where the keys are a unique name for
+ # each rule and the values are objects with the same fields as the ingress
+ # block in the aws_default_network_acl resource:
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/default_network_acl.
default_nacl_ingress_rules = {"AllowAll":{"action":"allow","cidr_block":"0.0.0.0/0","from_port":0,"protocol":"-1","rule_no":100,"to_port":0}}
- # The egress rules to apply to the default security group in the VPC. This is the
- # security group that is used by any resource that doesn't have its own security
- # group attached. The value for this variable must be a map where the keys are a
- # unique name for each rule and the values are objects with the same fields as the
- # egress block in the aws_default_security_group resource:
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/defa
- # lt_security_group#egress-block.
+ # The egress rules to apply to the default security group in the VPC. This is
+ # the security group that is used by any resource that doesn't have its own
+ # security group attached. The value for this variable must be a map where the
+ # keys are a unique name for each rule and the values are objects with the
+ # same fields as the egress block in the aws_default_security_group resource:
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/default_security_group#egress-block.
default_security_group_egress_rules = {"AllowAllOutbound":{"cidr_blocks":["0.0.0.0/0"],"from_port":0,"ipv6_cidr_blocks":["::/0"],"protocol":"-1","to_port":0}}
- # The ingress rules to apply to the default security group in the VPC. This is the
- # security group that is used by any resource that doesn't have its own security
- # group attached. The value for this variable must be a map where the keys are a
- # unique name for each rule and the values are objects with the same fields as the
- # ingress block in the aws_default_security_group resource:
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/defa
- # lt_security_group#ingress-block.
+ # The ingress rules to apply to the default security group in the VPC. This is
+ # the security group that is used by any resource that doesn't have its own
+ # security group attached. The value for this variable must be a map where the
+ # keys are a unique name for each rule and the values are objects with the
+ # same fields as the ingress block in the aws_default_security_group resource:
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/default_security_group#ingress-block.
default_security_group_ingress_rules = {"AllowAllFromSelf":{"from_port":0,"protocol":"-1","self":true,"to_port":0}}
# Name to set for the destination VPC resolver (inbound from origin VPC to
@@ -267,15 +264,17 @@ module "vpc" {
# If set to false, the default security groups will NOT be created.
enable_default_security_group = true
- # Additional IAM policies to apply to the S3 bucket to store flow logs. You can
- # use this to grant read/write access beyond what is provided to the VPC. This
- # should be a map, where each key is a unique statement ID (SID), and each value
- # is an object that contains the parameters defined in the comment below.
+ # Additional IAM policies to apply to the S3 bucket to store flow logs. You
+ # can use this to grant read/write access beyond what is provided to the VPC.
+ # This should be a map, where each key is a unique statement ID (SID), and
+ # each value is an object that contains the parameters defined in the comment
+ # below.
flow_log_additional_s3_bucket_policy_statements = null
- # The name to use for the flow log IAM role. This can be useful if you provision
- # the VPC without admin privileges which needs setting IAM:PassRole on deployment
- # role. When null, a default name based on the VPC name will be chosen.
+ # The name to use for the flow log IAM role. This can be useful if you
+ # provision the VPC without admin privileges which needs setting IAM:PassRole
+ # on deployment role. When null, a default name based on the VPC name will be
+ # chosen.
flow_log_cloudwatch_iam_role_name = null
# The name to use for the CloudWatch Log group used for storing flow log. When
@@ -293,57 +292,59 @@ module "vpc" {
# The name to use for the VPC flow logs S3 bucket.
flow_log_s3_bucket_name = null
- # For s3 log destinations, the number of days after which to expire (permanently
- # delete) flow logs. Defaults to 365.
+ # For s3 log destinations, the number of days after which to expire
+ # (permanently delete) flow logs. Defaults to 365.
flow_log_s3_expiration_transition = 365
- # For s3 log destinations, the number of days after which to transition the flow
- # log objects to glacier. Defaults to 180.
+ # For s3 log destinations, the number of days after which to transition the
+ # flow log objects to glacier. Defaults to 180.
flow_log_s3_glacier_transition = 180
- # For s3 log destinations, the number of days after which to transition the flow
- # log objects to infrequent access. Defaults to 30.
+ # For s3 log destinations, the number of days after which to transition the
+ # flow log objects to infrequent access. Defaults to 30.
flow_log_s3_infrequent_access_transition = 30
# if log_destination_type is s3, optionally specify a subfolder for flow log
# delivery.
flow_log_s3_subfolder = ""
- # The type of traffic to capture in the VPC flow log. Valid values include ACCEPT,
- # REJECT, or ALL. Defaults to REJECT. Only used if create_flow_logs is true.
+ # The type of traffic to capture in the VPC flow log. Valid values include
+ # ACCEPT, REJECT, or ALL. Defaults to REJECT. Only used if create_flow_logs is
+ # true.
flow_logs_traffic_type = "REJECT"
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role.
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role.
iam_role_permissions_boundary = null
- # The ARN of a KMS key to use for encrypting VPC the flow log. A new KMS key will
- # be created if this is not supplied.
+ # The ARN of a KMS key to use for encrypting VPC the flow log. A new KMS key
+ # will be created if this is not supplied.
kms_key_arn = null
- # The number of days to retain this KMS Key (a Customer Master Key) after it has
- # been marked for deletion. Setting to null defaults to the provider default,
- # which is the maximum possible value (30 days).
+ # The number of days to retain this KMS Key (a Customer Master Key) after it
+ # has been marked for deletion. Setting to null defaults to the provider
+ # default, which is the maximum possible value (30 days).
kms_key_deletion_window_in_days = null
- # VPC Flow Logs will be encrypted with a KMS Key (a Customer Master Key). The IAM
- # Users specified in this list will have access to this key.
+ # VPC Flow Logs will be encrypted with a KMS Key (a Customer Master Key). The
+ # IAM Users specified in this list will have access to this key.
kms_key_user_iam_arns = null
- # Specify true to indicate that instances launched into the public subnet should
- # be assigned a public IP address (versus a private IP address)
+ # Specify true to indicate that instances launched into the public subnet
+ # should be assigned a public IP address (versus a private IP address)
map_public_ip_on_launch = false
- # A map of tags to apply to the NAT gateways, on top of the custom_tags. The key
- # is the tag name and the value is the tag value. Note that tags defined here will
- # override tags defined as custom_tags in case of conflict.
+ # A map of tags to apply to the NAT gateways, on top of the custom_tags. The
+ # key is the tag name and the value is the tag value. Note that tags defined
+ # here will override tags defined as custom_tags in case of conflict.
nat_gateway_custom_tags = {}
- # How many AWS Availability Zones (AZs) to use. One subnet of each type (public,
- # private app) will be created in each AZ. Note that this must be less than or
- # equal to the total number of AZs in a region. A value of null means all AZs
- # should be used. For example, if you specify 3 in a region with 5 AZs, subnets
- # will be created in just 3 AZs instead of all 5. Defaults to all AZs in a region.
+ # How many AWS Availability Zones (AZs) to use. One subnet of each type
+ # (public, private app) will be created in each AZ. Note that this must be
+ # less than or equal to the total number of AZs in a region. A value of null
+ # means all AZs should be used. For example, if you specify 3 in a region with
+ # 5 AZs, subnets will be created in just 3 AZs instead of all 5. Defaults to
+ # all AZs in a region.
num_availability_zones = null
# The CIDR block of the origin VPC.
@@ -362,120 +363,127 @@ module "vpc" {
# forwarder is addressable publicly, access is blocked by security groups.
origin_vpc_public_subnet_ids = null
- # Name to set for the origin VPC resolver (outbound from origin VPC to destination
- # VPC). If null (default), defaults to
+ # Name to set for the origin VPC resolver (outbound from origin VPC to
+ # destination VPC). If null (default), defaults to
# 'ORIGIN_VPC_NAME-to-DESTINATION_VPC_NAME-out'.
origin_vpc_resolver_name = null
- # A list of route tables from the origin VPC that should have routes to this app
- # VPC.
+ # A list of route tables from the origin VPC that should have routes to this
+ # app VPC.
origin_vpc_route_table_ids = []
# A list of Virtual Private Gateways that will propagate routes to persistence
# subnets. All routes from VPN connections that use Virtual Private Gateways
- # listed here will appear in route tables of persistence subnets. If left empty,
- # no routes will be propagated.
+ # listed here will appear in route tables of persistence subnets. If left
+ # empty, no routes will be propagated.
persistence_propagating_vgws = []
# Takes the CIDR prefix and adds these many bits to it for calculating subnet
- # ranges. MAKE SURE if you change this you also change the CIDR spacing or you
- # may hit errors. See cidrsubnet interpolation in terraform config for more
- # information.
+ # ranges. MAKE SURE if you change this you also change the CIDR spacing or
+ # you may hit errors. See cidrsubnet interpolation in terraform config for
+ # more information.
persistence_subnet_bits = 5
- # The amount of spacing between the private persistence subnets. Default: 2 times
- # the value of private_subnet_spacing.
+ # The amount of spacing between the private persistence subnets. Default: 2
+ # times the value of private_subnet_spacing.
persistence_subnet_spacing = null
- # A map of unique names to client IP CIDR block and inbound ports that should be
- # exposed in the private app subnet tier nACLs. This is useful when exposing your
- # service on a privileged port with an NLB, where the address isn't translated.
+ # A map of unique names to client IP CIDR block and inbound ports that should
+ # be exposed in the private app subnet tier nACLs. This is useful when
+ # exposing your service on a privileged port with an NLB, where the address
+ # isn't translated.
private_app_allow_inbound_ports_from_cidr = {}
# A map of unique names to destination IP CIDR block and outbound ports that
# should be allowed in the private app subnet tier nACLs. This is useful when
- # allowing your VPC specific outbound communication to defined CIDR blocks(known
- # networks)
+ # allowing your VPC specific outbound communication to defined CIDR
+ # blocks(known networks)
private_app_allow_outbound_ports_to_destination_cidr = {}
# A map of tags to apply to the private-app route table(s), on top of the
- # custom_tags. The key is the tag name and the value is the tag value. Note that
- # tags defined here will override tags defined as custom_tags in case of conflict.
+ # custom_tags. The key is the tag name and the value is the tag value. Note
+ # that tags defined here will override tags defined as custom_tags in case of
+ # conflict.
private_app_route_table_custom_tags = {}
- # A map listing the specific CIDR blocks desired for each private-app subnet. The
- # key must be in the form AZ-0, AZ-1, ... AZ-n where n is the number of
- # Availability Zones. If left blank, we will compute a reasonable CIDR block for
- # each subnet.
+ # A map listing the specific CIDR blocks desired for each private-app subnet.
+ # The key must be in the form AZ-0, AZ-1, ... AZ-n where n is the number of
+ # Availability Zones. If left blank, we will compute a reasonable CIDR block
+ # for each subnet.
private_app_subnet_cidr_blocks = {}
- # A map of tags to apply to the private-app Subnet, on top of the custom_tags. The
- # key is the tag name and the value is the tag value. Note that tags defined here
- # will override tags defined as custom_tags in case of conflict.
+ # A map of tags to apply to the private-app Subnet, on top of the custom_tags.
+ # The key is the tag name and the value is the tag value. Note that tags
+ # defined here will override tags defined as custom_tags in case of conflict.
private_app_subnet_custom_tags = {}
- # A map of tags to apply to the private-persistence route tables(s), on top of the
- # custom_tags. The key is the tag name and the value is the tag value. Note that
- # tags defined here will override tags defined as custom_tags in case of conflict.
+ # A map of tags to apply to the private-persistence route tables(s), on top of
+ # the custom_tags. The key is the tag name and the value is the tag value.
+ # Note that tags defined here will override tags defined as custom_tags in
+ # case of conflict.
private_persistence_route_table_custom_tags = {}
# A map listing the specific CIDR blocks desired for each private-persistence
- # subnet. The key must be in the form AZ-0, AZ-1, ... AZ-n where n is the number
- # of Availability Zones. If left blank, we will compute a reasonable CIDR block
- # for each subnet.
+ # subnet. The key must be in the form AZ-0, AZ-1, ... AZ-n where n is the
+ # number of Availability Zones. If left blank, we will compute a reasonable
+ # CIDR block for each subnet.
private_persistence_subnet_cidr_blocks = {}
# A map of tags to apply to the private-persistence Subnet, on top of the
- # custom_tags. The key is the tag name and the value is the tag value. Note that
- # tags defined here will override tags defined as custom_tags in case of conflict.
+ # custom_tags. The key is the tag name and the value is the tag value. Note
+ # that tags defined here will override tags defined as custom_tags in case of
+ # conflict.
private_persistence_subnet_custom_tags = {}
# A list of Virtual Private Gateways that will propagate routes to private
# subnets. All routes from VPN connections that use Virtual Private Gateways
- # listed here will appear in route tables of private subnets. If left empty, no
- # routes will be propagated.
+ # listed here will appear in route tables of private subnets. If left empty,
+ # no routes will be propagated.
private_propagating_vgws = []
# Takes the CIDR prefix and adds these many bits to it for calculating subnet
- # ranges. MAKE SURE if you change this you also change the CIDR spacing or you
- # may hit errors. See cidrsubnet interpolation in terraform config for more
- # information.
+ # ranges. MAKE SURE if you change this you also change the CIDR spacing or
+ # you may hit errors. See cidrsubnet interpolation in terraform config for
+ # more information.
private_subnet_bits = 5
- # The amount of spacing between private app subnets. Defaults to subnet_spacing in
- # vpc-app module if not set.
+ # The amount of spacing between private app subnets. Defaults to
+ # subnet_spacing in vpc-app module if not set.
private_subnet_spacing = null
- # A list of Virtual Private Gateways that will propagate routes to public subnets.
- # All routes from VPN connections that use Virtual Private Gateways listed here
- # will appear in route tables of public subnets. If left empty, no routes will be
- # propagated.
+ # A list of Virtual Private Gateways that will propagate routes to public
+ # subnets. All routes from VPN connections that use Virtual Private Gateways
+ # listed here will appear in route tables of public subnets. If left empty, no
+ # routes will be propagated.
public_propagating_vgws = []
- # A map of tags to apply to the public route table(s), on top of the custom_tags.
- # The key is the tag name and the value is the tag value. Note that tags defined
- # here will override tags defined as custom_tags in case of conflict.
+ # A map of tags to apply to the public route table(s), on top of the
+ # custom_tags. The key is the tag name and the value is the tag value. Note
+ # that tags defined here will override tags defined as custom_tags in case of
+ # conflict.
public_route_table_custom_tags = {}
# Takes the CIDR prefix and adds these many bits to it for calculating subnet
- # ranges. MAKE SURE if you change this you also change the CIDR spacing or you
- # may hit errors. See cidrsubnet interpolation in terraform config for more
- # information.
+ # ranges. MAKE SURE if you change this you also change the CIDR spacing or
+ # you may hit errors. See cidrsubnet interpolation in terraform config for
+ # more information.
public_subnet_bits = 5
- # A map listing the specific CIDR blocks desired for each public subnet. The key
- # must be in the form AZ-0, AZ-1, ... AZ-n where n is the number of Availability
- # Zones. If left blank, we will compute a reasonable CIDR block for each subnet.
+ # A map listing the specific CIDR blocks desired for each public subnet. The
+ # key must be in the form AZ-0, AZ-1, ... AZ-n where n is the number of
+ # Availability Zones. If left blank, we will compute a reasonable CIDR block
+ # for each subnet.
public_subnet_cidr_blocks = {}
- # A map of tags to apply to the public Subnet, on top of the custom_tags. The key
- # is the tag name and the value is the tag value. Note that tags defined here will
- # override tags defined as custom_tags in case of conflict.
+ # A map of tags to apply to the public Subnet, on top of the custom_tags. The
+ # key is the tag name and the value is the tag value. Note that tags defined
+ # here will override tags defined as custom_tags in case of conflict.
public_subnet_custom_tags = {}
- # A map of tags to apply to the default Security Group, on top of the custom_tags.
- # The key is the tag name and the value is the tag value. Note that tags defined
- # here will override tags defined as custom_tags in case of conflict.
+ # A map of tags to apply to the default Security Group, on top of the
+ # custom_tags. The key is the tag name and the value is the tag value. Note
+ # that tags defined here will override tags defined as custom_tags in case of
+ # conflict.
security_group_tags = {}
# The amount of spacing between the different subnet types
@@ -485,20 +493,21 @@ module "vpc" {
# with certain features, like deploying ALBs.
tag_for_use_with_eks = false
- # The allowed tenancy of instances launched into the selected VPC. Must be one of:
- # default, dedicated, or host.
+ # The allowed tenancy of instances launched into the selected VPC. Must be one
+ # of: default, dedicated, or host.
tenancy = "default"
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
# A map of tags to apply just to the VPC itself, but not any of the other
# resources. The key is the tag name and the value is the tag value. Note that
- # tags defined here will override tags defined as custom_tags in case of conflict.
+ # tags defined here will override tags defined as custom_tags in case of
+ # conflict.
vpc_custom_tags = {}
}
@@ -516,7 +525,7 @@ module "vpc" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/vpc?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/vpc?ref=v0.104.12"
}
inputs = {
@@ -530,10 +539,10 @@ inputs = {
# '10.100.0.0/18', '10.200.0.0/18', etc.
cidr_block =
- # The number of NAT Gateways to launch for this VPC. For production VPCs, a NAT
- # Gateway should be placed in each Availability Zone (so likely 3 total), whereas
- # for non-prod VPCs, just one Availability Zone (and hence 1 NAT Gateway) will
- # suffice.
+ # The number of NAT Gateways to launch for this VPC. For production VPCs, a
+ # NAT Gateway should be placed in each Availability Zone (so likely 3 total),
+ # whereas for non-prod VPCs, just one Availability Zone (and hence 1 NAT
+ # Gateway) will suffice.
num_nat_gateways =
# Name of the VPC. Examples include 'prod', 'dev', 'mgmt', etc.
@@ -547,51 +556,51 @@ inputs = {
# internet?
allow_private_persistence_internet_access = false
- # If true, will apply the default NACL rules in var.default_nacl_ingress_rules and
- # var.default_nacl_egress_rules on the default NACL of the VPC. Note that every
- # VPC must have a default NACL - when this is false, the original default NACL
- # rules managed by AWS will be used.
+ # If true, will apply the default NACL rules in var.default_nacl_ingress_rules
+ # and var.default_nacl_egress_rules on the default NACL of the VPC. Note that
+ # every VPC must have a default NACL - when this is false, the original
+ # default NACL rules managed by AWS will be used.
apply_default_nacl_rules = false
- # If true, will associate the default NACL to the public, private, and persistence
- # subnets created by this module. Only used if var.apply_default_nacl_rules is
- # true. Note that this does not guarantee that the subnets are associated with the
- # default NACL. Subnets can only be associated with a single NACL. The default
- # NACL association will be dropped if the subnets are associated with a custom
- # NACL later.
+ # If true, will associate the default NACL to the public, private, and
+ # persistence subnets created by this module. Only used if
+ # var.apply_default_nacl_rules is true. Note that this does not guarantee that
+ # the subnets are associated with the default NACL. Subnets can only be
+ # associated with a single NACL. The default NACL association will be dropped
+ # if the subnets are associated with a custom NACL later.
associate_default_nacl_to_subnets = true
- # Specific Availability Zones in which subnets SHOULD NOT be created. Useful for
- # when features / support is missing from a given AZ.
+ # Specific Availability Zones in which subnets SHOULD NOT be created. Useful
+ # for when features / support is missing from a given AZ.
availability_zone_exclude_names = []
- # DEPRECATED. The AWS Region where this VPC will exist. This variable is no longer
- # used and only kept around for backwards compatibility. We now automatically
- # fetch the region using a data source.
+ # DEPRECATED. The AWS Region where this VPC will exist. This variable is no
+ # longer used and only kept around for backwards compatibility. We now
+ # automatically fetch the region using a data source.
aws_region = ""
# Whether or not to create DNS forwarders from the Mgmt VPC to the App VPC to
- # resolve private Route 53 endpoints. This is most useful when you want to keep
- # your EKS Kubernetes API endpoint private to the VPC, but want to access it from
- # the Mgmt VPC (where your VPN/Bastion servers are).
+ # resolve private Route 53 endpoints. This is most useful when you want to
+ # keep your EKS Kubernetes API endpoint private to the VPC, but want to access
+ # it from the Mgmt VPC (where your VPN/Bastion servers are).
create_dns_forwarder = false
# If you set this variable to false, this module will not create VPC Flow Logs
- # resources. This is used as a workaround because Terraform does not allow you to
- # use the 'count' parameter on modules. By using this parameter, you can
+ # resources. This is used as a workaround because Terraform does not allow you
+ # to use the 'count' parameter on modules. By using this parameter, you can
# optionally create or not create the resources within this module.
create_flow_logs = true
- # Whether the VPC will create an Internet Gateway. There are use cases when the
- # VPC is desired to not be routable from the internet, and hence, they should not
- # have an Internet Gateway. For example, when it is desired that public subnets
- # exist but they are not directly public facing, since they can be routed from
- # other VPC hosting the IGW.
+ # Whether the VPC will create an Internet Gateway. There are use cases when
+ # the VPC is desired to not be routable from the internet, and hence, they
+ # should not have an Internet Gateway. For example, when it is desired that
+ # public subnets exist but they are not directly public facing, since they can
+ # be routed from other VPC hosting the IGW.
create_igw = true
- # If set to false, this module will NOT create Network ACLs. This is useful if you
- # don't want to use Network ACLs or you want to provide your own Network ACLs
- # outside of this module.
+ # If set to false, this module will NOT create Network ACLs. This is useful if
+ # you don't want to use Network ACLs or you want to provide your own Network
+ # ACLs outside of this module.
create_network_acls = true
# Whether or not to create a peering connection to another VPC.
@@ -617,56 +626,53 @@ inputs = {
create_public_subnet_nacls = true
# If set to false, this module will NOT create the public subnet tier. This is
- # useful for VPCs that only need private subnets. Note that setting this to false
- # also means the module will NOT create an Internet Gateway or the NAT gateways,
- # so if you want any public Internet access in the VPC (even outbound access—e.g.,
- # to run apt get), you'll need to provide it yourself via some other mechanism
- # (e.g., via VPC peering, a Transit Gateway, Direct Connect, etc).
+ # useful for VPCs that only need private subnets. Note that setting this to
+ # false also means the module will NOT create an Internet Gateway or the NAT
+ # gateways, so if you want any public Internet access in the VPC (even
+ # outbound access—e.g., to run apt get), you'll need to provide it yourself
+ # via some other mechanism (e.g., via VPC peering, a Transit Gateway, Direct
+ # Connect, etc).
create_public_subnets = true
# Create VPC endpoints for S3 and DynamoDB.
create_vpc_endpoints = true
# A map of tags to apply to the VPC, Subnets, Route Tables, Internet Gateway,
- # default security group, and default NACLs. The key is the tag name and the value
- # is the tag value. Note that the tag 'Name' is automatically added by this module
- # but may be optionally overwritten by this variable.
+ # default security group, and default NACLs. The key is the tag name and the
+ # value is the tag value. Note that the tag 'Name' is automatically added by
+ # this module but may be optionally overwritten by this variable.
custom_tags = {}
- # The egress rules to apply to the default NACL in the VPC. This is the security
- # group that is used by any subnet that doesn't have its own NACL attached. The
- # value for this variable must be a map where the keys are a unique name for each
- # rule and the values are objects with the same fields as the egress block in the
- # aws_default_network_acl resource:
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/defa
- # lt_network_acl.
+ # The egress rules to apply to the default NACL in the VPC. This is the
+ # security group that is used by any subnet that doesn't have its own NACL
+ # attached. The value for this variable must be a map where the keys are a
+ # unique name for each rule and the values are objects with the same fields as
+ # the egress block in the aws_default_network_acl resource:
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/default_network_acl.
default_nacl_egress_rules = {"AllowAll":{"action":"allow","cidr_block":"0.0.0.0/0","from_port":0,"protocol":"-1","rule_no":100,"to_port":0}}
- # The ingress rules to apply to the default NACL in the VPC. This is the NACL that
- # is used by any subnet that doesn't have its own NACL attached. The value for
- # this variable must be a map where the keys are a unique name for each rule and
- # the values are objects with the same fields as the ingress block in the
- # aws_default_network_acl resource:
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/defa
- # lt_network_acl.
+ # The ingress rules to apply to the default NACL in the VPC. This is the NACL
+ # that is used by any subnet that doesn't have its own NACL attached. The
+ # value for this variable must be a map where the keys are a unique name for
+ # each rule and the values are objects with the same fields as the ingress
+ # block in the aws_default_network_acl resource:
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/default_network_acl.
default_nacl_ingress_rules = {"AllowAll":{"action":"allow","cidr_block":"0.0.0.0/0","from_port":0,"protocol":"-1","rule_no":100,"to_port":0}}
- # The egress rules to apply to the default security group in the VPC. This is the
- # security group that is used by any resource that doesn't have its own security
- # group attached. The value for this variable must be a map where the keys are a
- # unique name for each rule and the values are objects with the same fields as the
- # egress block in the aws_default_security_group resource:
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/defa
- # lt_security_group#egress-block.
+ # The egress rules to apply to the default security group in the VPC. This is
+ # the security group that is used by any resource that doesn't have its own
+ # security group attached. The value for this variable must be a map where the
+ # keys are a unique name for each rule and the values are objects with the
+ # same fields as the egress block in the aws_default_security_group resource:
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/default_security_group#egress-block.
default_security_group_egress_rules = {"AllowAllOutbound":{"cidr_blocks":["0.0.0.0/0"],"from_port":0,"ipv6_cidr_blocks":["::/0"],"protocol":"-1","to_port":0}}
- # The ingress rules to apply to the default security group in the VPC. This is the
- # security group that is used by any resource that doesn't have its own security
- # group attached. The value for this variable must be a map where the keys are a
- # unique name for each rule and the values are objects with the same fields as the
- # ingress block in the aws_default_security_group resource:
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/defa
- # lt_security_group#ingress-block.
+ # The ingress rules to apply to the default security group in the VPC. This is
+ # the security group that is used by any resource that doesn't have its own
+ # security group attached. The value for this variable must be a map where the
+ # keys are a unique name for each rule and the values are objects with the
+ # same fields as the ingress block in the aws_default_security_group resource:
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/default_security_group#ingress-block.
default_security_group_ingress_rules = {"AllowAllFromSelf":{"from_port":0,"protocol":"-1","self":true,"to_port":0}}
# Name to set for the destination VPC resolver (inbound from origin VPC to
@@ -681,15 +687,17 @@ inputs = {
# If set to false, the default security groups will NOT be created.
enable_default_security_group = true
- # Additional IAM policies to apply to the S3 bucket to store flow logs. You can
- # use this to grant read/write access beyond what is provided to the VPC. This
- # should be a map, where each key is a unique statement ID (SID), and each value
- # is an object that contains the parameters defined in the comment below.
+ # Additional IAM policies to apply to the S3 bucket to store flow logs. You
+ # can use this to grant read/write access beyond what is provided to the VPC.
+ # This should be a map, where each key is a unique statement ID (SID), and
+ # each value is an object that contains the parameters defined in the comment
+ # below.
flow_log_additional_s3_bucket_policy_statements = null
- # The name to use for the flow log IAM role. This can be useful if you provision
- # the VPC without admin privileges which needs setting IAM:PassRole on deployment
- # role. When null, a default name based on the VPC name will be chosen.
+ # The name to use for the flow log IAM role. This can be useful if you
+ # provision the VPC without admin privileges which needs setting IAM:PassRole
+ # on deployment role. When null, a default name based on the VPC name will be
+ # chosen.
flow_log_cloudwatch_iam_role_name = null
# The name to use for the CloudWatch Log group used for storing flow log. When
@@ -707,57 +715,59 @@ inputs = {
# The name to use for the VPC flow logs S3 bucket.
flow_log_s3_bucket_name = null
- # For s3 log destinations, the number of days after which to expire (permanently
- # delete) flow logs. Defaults to 365.
+ # For s3 log destinations, the number of days after which to expire
+ # (permanently delete) flow logs. Defaults to 365.
flow_log_s3_expiration_transition = 365
- # For s3 log destinations, the number of days after which to transition the flow
- # log objects to glacier. Defaults to 180.
+ # For s3 log destinations, the number of days after which to transition the
+ # flow log objects to glacier. Defaults to 180.
flow_log_s3_glacier_transition = 180
- # For s3 log destinations, the number of days after which to transition the flow
- # log objects to infrequent access. Defaults to 30.
+ # For s3 log destinations, the number of days after which to transition the
+ # flow log objects to infrequent access. Defaults to 30.
flow_log_s3_infrequent_access_transition = 30
# if log_destination_type is s3, optionally specify a subfolder for flow log
# delivery.
flow_log_s3_subfolder = ""
- # The type of traffic to capture in the VPC flow log. Valid values include ACCEPT,
- # REJECT, or ALL. Defaults to REJECT. Only used if create_flow_logs is true.
+ # The type of traffic to capture in the VPC flow log. Valid values include
+ # ACCEPT, REJECT, or ALL. Defaults to REJECT. Only used if create_flow_logs is
+ # true.
flow_logs_traffic_type = "REJECT"
- # The ARN of the policy that is used to set the permissions boundary for the IAM
- # role.
+ # The ARN of the policy that is used to set the permissions boundary for the
+ # IAM role.
iam_role_permissions_boundary = null
- # The ARN of a KMS key to use for encrypting VPC the flow log. A new KMS key will
- # be created if this is not supplied.
+ # The ARN of a KMS key to use for encrypting VPC the flow log. A new KMS key
+ # will be created if this is not supplied.
kms_key_arn = null
- # The number of days to retain this KMS Key (a Customer Master Key) after it has
- # been marked for deletion. Setting to null defaults to the provider default,
- # which is the maximum possible value (30 days).
+ # The number of days to retain this KMS Key (a Customer Master Key) after it
+ # has been marked for deletion. Setting to null defaults to the provider
+ # default, which is the maximum possible value (30 days).
kms_key_deletion_window_in_days = null
- # VPC Flow Logs will be encrypted with a KMS Key (a Customer Master Key). The IAM
- # Users specified in this list will have access to this key.
+ # VPC Flow Logs will be encrypted with a KMS Key (a Customer Master Key). The
+ # IAM Users specified in this list will have access to this key.
kms_key_user_iam_arns = null
- # Specify true to indicate that instances launched into the public subnet should
- # be assigned a public IP address (versus a private IP address)
+ # Specify true to indicate that instances launched into the public subnet
+ # should be assigned a public IP address (versus a private IP address)
map_public_ip_on_launch = false
- # A map of tags to apply to the NAT gateways, on top of the custom_tags. The key
- # is the tag name and the value is the tag value. Note that tags defined here will
- # override tags defined as custom_tags in case of conflict.
+ # A map of tags to apply to the NAT gateways, on top of the custom_tags. The
+ # key is the tag name and the value is the tag value. Note that tags defined
+ # here will override tags defined as custom_tags in case of conflict.
nat_gateway_custom_tags = {}
- # How many AWS Availability Zones (AZs) to use. One subnet of each type (public,
- # private app) will be created in each AZ. Note that this must be less than or
- # equal to the total number of AZs in a region. A value of null means all AZs
- # should be used. For example, if you specify 3 in a region with 5 AZs, subnets
- # will be created in just 3 AZs instead of all 5. Defaults to all AZs in a region.
+ # How many AWS Availability Zones (AZs) to use. One subnet of each type
+ # (public, private app) will be created in each AZ. Note that this must be
+ # less than or equal to the total number of AZs in a region. A value of null
+ # means all AZs should be used. For example, if you specify 3 in a region with
+ # 5 AZs, subnets will be created in just 3 AZs instead of all 5. Defaults to
+ # all AZs in a region.
num_availability_zones = null
# The CIDR block of the origin VPC.
@@ -776,120 +786,127 @@ inputs = {
# forwarder is addressable publicly, access is blocked by security groups.
origin_vpc_public_subnet_ids = null
- # Name to set for the origin VPC resolver (outbound from origin VPC to destination
- # VPC). If null (default), defaults to
+ # Name to set for the origin VPC resolver (outbound from origin VPC to
+ # destination VPC). If null (default), defaults to
# 'ORIGIN_VPC_NAME-to-DESTINATION_VPC_NAME-out'.
origin_vpc_resolver_name = null
- # A list of route tables from the origin VPC that should have routes to this app
- # VPC.
+ # A list of route tables from the origin VPC that should have routes to this
+ # app VPC.
origin_vpc_route_table_ids = []
# A list of Virtual Private Gateways that will propagate routes to persistence
# subnets. All routes from VPN connections that use Virtual Private Gateways
- # listed here will appear in route tables of persistence subnets. If left empty,
- # no routes will be propagated.
+ # listed here will appear in route tables of persistence subnets. If left
+ # empty, no routes will be propagated.
persistence_propagating_vgws = []
# Takes the CIDR prefix and adds these many bits to it for calculating subnet
- # ranges. MAKE SURE if you change this you also change the CIDR spacing or you
- # may hit errors. See cidrsubnet interpolation in terraform config for more
- # information.
+ # ranges. MAKE SURE if you change this you also change the CIDR spacing or
+ # you may hit errors. See cidrsubnet interpolation in terraform config for
+ # more information.
persistence_subnet_bits = 5
- # The amount of spacing between the private persistence subnets. Default: 2 times
- # the value of private_subnet_spacing.
+ # The amount of spacing between the private persistence subnets. Default: 2
+ # times the value of private_subnet_spacing.
persistence_subnet_spacing = null
- # A map of unique names to client IP CIDR block and inbound ports that should be
- # exposed in the private app subnet tier nACLs. This is useful when exposing your
- # service on a privileged port with an NLB, where the address isn't translated.
+ # A map of unique names to client IP CIDR block and inbound ports that should
+ # be exposed in the private app subnet tier nACLs. This is useful when
+ # exposing your service on a privileged port with an NLB, where the address
+ # isn't translated.
private_app_allow_inbound_ports_from_cidr = {}
# A map of unique names to destination IP CIDR block and outbound ports that
# should be allowed in the private app subnet tier nACLs. This is useful when
- # allowing your VPC specific outbound communication to defined CIDR blocks(known
- # networks)
+ # allowing your VPC specific outbound communication to defined CIDR
+ # blocks(known networks)
private_app_allow_outbound_ports_to_destination_cidr = {}
# A map of tags to apply to the private-app route table(s), on top of the
- # custom_tags. The key is the tag name and the value is the tag value. Note that
- # tags defined here will override tags defined as custom_tags in case of conflict.
+ # custom_tags. The key is the tag name and the value is the tag value. Note
+ # that tags defined here will override tags defined as custom_tags in case of
+ # conflict.
private_app_route_table_custom_tags = {}
- # A map listing the specific CIDR blocks desired for each private-app subnet. The
- # key must be in the form AZ-0, AZ-1, ... AZ-n where n is the number of
- # Availability Zones. If left blank, we will compute a reasonable CIDR block for
- # each subnet.
+ # A map listing the specific CIDR blocks desired for each private-app subnet.
+ # The key must be in the form AZ-0, AZ-1, ... AZ-n where n is the number of
+ # Availability Zones. If left blank, we will compute a reasonable CIDR block
+ # for each subnet.
private_app_subnet_cidr_blocks = {}
- # A map of tags to apply to the private-app Subnet, on top of the custom_tags. The
- # key is the tag name and the value is the tag value. Note that tags defined here
- # will override tags defined as custom_tags in case of conflict.
+ # A map of tags to apply to the private-app Subnet, on top of the custom_tags.
+ # The key is the tag name and the value is the tag value. Note that tags
+ # defined here will override tags defined as custom_tags in case of conflict.
private_app_subnet_custom_tags = {}
- # A map of tags to apply to the private-persistence route tables(s), on top of the
- # custom_tags. The key is the tag name and the value is the tag value. Note that
- # tags defined here will override tags defined as custom_tags in case of conflict.
+ # A map of tags to apply to the private-persistence route tables(s), on top of
+ # the custom_tags. The key is the tag name and the value is the tag value.
+ # Note that tags defined here will override tags defined as custom_tags in
+ # case of conflict.
private_persistence_route_table_custom_tags = {}
# A map listing the specific CIDR blocks desired for each private-persistence
- # subnet. The key must be in the form AZ-0, AZ-1, ... AZ-n where n is the number
- # of Availability Zones. If left blank, we will compute a reasonable CIDR block
- # for each subnet.
+ # subnet. The key must be in the form AZ-0, AZ-1, ... AZ-n where n is the
+ # number of Availability Zones. If left blank, we will compute a reasonable
+ # CIDR block for each subnet.
private_persistence_subnet_cidr_blocks = {}
# A map of tags to apply to the private-persistence Subnet, on top of the
- # custom_tags. The key is the tag name and the value is the tag value. Note that
- # tags defined here will override tags defined as custom_tags in case of conflict.
+ # custom_tags. The key is the tag name and the value is the tag value. Note
+ # that tags defined here will override tags defined as custom_tags in case of
+ # conflict.
private_persistence_subnet_custom_tags = {}
# A list of Virtual Private Gateways that will propagate routes to private
# subnets. All routes from VPN connections that use Virtual Private Gateways
- # listed here will appear in route tables of private subnets. If left empty, no
- # routes will be propagated.
+ # listed here will appear in route tables of private subnets. If left empty,
+ # no routes will be propagated.
private_propagating_vgws = []
# Takes the CIDR prefix and adds these many bits to it for calculating subnet
- # ranges. MAKE SURE if you change this you also change the CIDR spacing or you
- # may hit errors. See cidrsubnet interpolation in terraform config for more
- # information.
+ # ranges. MAKE SURE if you change this you also change the CIDR spacing or
+ # you may hit errors. See cidrsubnet interpolation in terraform config for
+ # more information.
private_subnet_bits = 5
- # The amount of spacing between private app subnets. Defaults to subnet_spacing in
- # vpc-app module if not set.
+ # The amount of spacing between private app subnets. Defaults to
+ # subnet_spacing in vpc-app module if not set.
private_subnet_spacing = null
- # A list of Virtual Private Gateways that will propagate routes to public subnets.
- # All routes from VPN connections that use Virtual Private Gateways listed here
- # will appear in route tables of public subnets. If left empty, no routes will be
- # propagated.
+ # A list of Virtual Private Gateways that will propagate routes to public
+ # subnets. All routes from VPN connections that use Virtual Private Gateways
+ # listed here will appear in route tables of public subnets. If left empty, no
+ # routes will be propagated.
public_propagating_vgws = []
- # A map of tags to apply to the public route table(s), on top of the custom_tags.
- # The key is the tag name and the value is the tag value. Note that tags defined
- # here will override tags defined as custom_tags in case of conflict.
+ # A map of tags to apply to the public route table(s), on top of the
+ # custom_tags. The key is the tag name and the value is the tag value. Note
+ # that tags defined here will override tags defined as custom_tags in case of
+ # conflict.
public_route_table_custom_tags = {}
# Takes the CIDR prefix and adds these many bits to it for calculating subnet
- # ranges. MAKE SURE if you change this you also change the CIDR spacing or you
- # may hit errors. See cidrsubnet interpolation in terraform config for more
- # information.
+ # ranges. MAKE SURE if you change this you also change the CIDR spacing or
+ # you may hit errors. See cidrsubnet interpolation in terraform config for
+ # more information.
public_subnet_bits = 5
- # A map listing the specific CIDR blocks desired for each public subnet. The key
- # must be in the form AZ-0, AZ-1, ... AZ-n where n is the number of Availability
- # Zones. If left blank, we will compute a reasonable CIDR block for each subnet.
+ # A map listing the specific CIDR blocks desired for each public subnet. The
+ # key must be in the form AZ-0, AZ-1, ... AZ-n where n is the number of
+ # Availability Zones. If left blank, we will compute a reasonable CIDR block
+ # for each subnet.
public_subnet_cidr_blocks = {}
- # A map of tags to apply to the public Subnet, on top of the custom_tags. The key
- # is the tag name and the value is the tag value. Note that tags defined here will
- # override tags defined as custom_tags in case of conflict.
+ # A map of tags to apply to the public Subnet, on top of the custom_tags. The
+ # key is the tag name and the value is the tag value. Note that tags defined
+ # here will override tags defined as custom_tags in case of conflict.
public_subnet_custom_tags = {}
- # A map of tags to apply to the default Security Group, on top of the custom_tags.
- # The key is the tag name and the value is the tag value. Note that tags defined
- # here will override tags defined as custom_tags in case of conflict.
+ # A map of tags to apply to the default Security Group, on top of the
+ # custom_tags. The key is the tag name and the value is the tag value. Note
+ # that tags defined here will override tags defined as custom_tags in case of
+ # conflict.
security_group_tags = {}
# The amount of spacing between the different subnet types
@@ -899,20 +916,21 @@ inputs = {
# with certain features, like deploying ALBs.
tag_for_use_with_eks = false
- # The allowed tenancy of instances launched into the selected VPC. Must be one of:
- # default, dedicated, or host.
+ # The allowed tenancy of instances launched into the selected VPC. Must be one
+ # of: default, dedicated, or host.
tenancy = "default"
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
# A map of tags to apply just to the VPC itself, but not any of the other
# resources. The key is the tag name and the value is the tag value. Note that
- # tags defined here will override tags defined as custom_tags in case of conflict.
+ # tags defined here will override tags defined as custom_tags in case of
+ # conflict.
vpc_custom_tags = {}
}
@@ -2100,11 +2118,11 @@ Indicates whether or not the VPC has finished creating
diff --git a/docs/reference/services/security/bastion.md b/docs/reference/services/security/bastion.md
index 718d4a0dad..7a075a33cb 100644
--- a/docs/reference/services/security/bastion.md
+++ b/docs/reference/services/security/bastion.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Bastion Host
-View Source
+View Source
Release Notes
@@ -87,7 +87,7 @@ The bastion host AMI is defined using the [Packer](https://www.packer.io/) templ
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -95,7 +95,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog, configure CI / CD for your apps and
@@ -115,26 +115,26 @@ If you want to deploy this repo in production, check out the following resources
module "bastion_host" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/bastion-host?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/bastion-host?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
# ----------------------------------------------------------------------------------------------------
# A list of IP address ranges in CIDR format from which SSH access will be
- # permitted. Attempts to access the bastion host from all other IP addresses will
- # be blocked. This is only used if var.allow_ssh_from_cidr is true.
+ # permitted. Attempts to access the bastion host from all other IP addresses
+ # will be blocked. This is only used if var.allow_ssh_from_cidr is true.
allow_ssh_from_cidr_list =
# The AMI to run on the bastion host. This should be built from the Packer
- # template under bastion-host.json. One of var.ami or var.ami_filters is required.
- # Set to null if looking up the ami with filters.
+ # template under bastion-host.json. One of var.ami or var.ami_filters is
+ # required. Set to null if looking up the ami with filters.
ami =
- # Properties on the AMI that can be used to lookup a prebuilt AMI for use with the
- # Bastion Host. You can build the AMI using the Packer template bastion-host.json.
- # Only used if var.ami is null. One of var.ami or var.ami_filters is required. Set
- # to null if passing the ami ID directly.
+ # Properties on the AMI that can be used to lookup a prebuilt AMI for use with
+ # the Bastion Host. You can build the AMI using the Packer template
+ # bastion-host.json. Only used if var.ami is null. One of var.ami or
+ # var.ami_filters is required. Set to null if passing the ami ID directly.
ami_filters =
# The AMI to run on the bastion host. This should be built from the Packer
- # template under bastion-host.json. One of var.ami or var.ami_filters is required.
- # Set to null if looking up the ami with filters.
+ # template under bastion-host.json. One of var.ami or var.ami_filters is
+ # required. Set to null if looking up the ami with filters.
ami =
- # Properties on the AMI that can be used to lookup a prebuilt AMI for use with the
- # Bastion Host. You can build the AMI using the Packer template bastion-host.json.
- # Only used if var.ami is null. One of var.ami or var.ami_filters is required. Set
- # to null if passing the ami ID directly.
+ # Properties on the AMI that can be used to lookup a prebuilt AMI for use with
+ # the Bastion Host. You can build the AMI using the Packer template
+ # bastion-host.json. Only used if var.ami is null. One of var.ami or
+ # var.ami_filters is required. Set to null if passing the ami ID directly.
ami_filters =
diff --git a/docs/reference/services/security/open-vpn.md b/docs/reference/services/security/open-vpn.md
index 3322893b24..cf774bf3c0 100644
--- a/docs/reference/services/security/open-vpn.md
+++ b/docs/reference/services/security/open-vpn.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# OpenVPN Server
-View Source
+View Source
Release Notes
@@ -74,7 +74,7 @@ documentation in the [package-openvpn](https://github.com/gruntwork-io/terraform
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -82,7 +82,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog, configure CI / CD for your apps and
@@ -102,7 +102,7 @@ If you want to deploy this repo in production, check out the following resources
module "openvpn_server" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/openvpn-server?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/openvpn-server?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -118,8 +118,8 @@ module "openvpn_server" {
# required. Set to null if looking up the ami with filters.
ami =
- # Properties on the AMI that can be used to lookup a prebuilt AMI for use with the
- # OpenVPN server. You can build the AMI using the Packer template
+ # Properties on the AMI that can be used to lookup a prebuilt AMI for use with
+ # the OpenVPN server. You can build the AMI using the Packer template
# openvpn-server.json. Only used if var.ami is null. One of var.ami or
# var.ami_filters is required. Set to null if passing the ami ID directly.
ami_filters =
# The name of the S3 bucket that will be used to backup PKI secrets. This is a
- # required variable because bucket names must be globally unique across all AWS
- # customers.
+ # required variable because bucket names must be globally unique across all
+ # AWS customers.
backup_bucket_name =
# An object with fields for the country, state, locality, organization,
- # organizational unit, and email address to use with the OpenVPN CA certificate.
+ # organizational unit, and email address to use with the OpenVPN CA
+ # certificate.
ca_cert_fields = :user/). If this list is
- # empty, and var.kms_key_arn is null, the ARN of the current user will be used.
+ # A list of IAM ARNs for users who should be given administrator access to
+ # this CMK (e.g. arn:aws:iam:::user/). If this
+ # list is empty, and var.kms_key_arn is null, the ARN of the current user will
+ # be used.
cmk_administrator_iam_arns = []
# A list of IAM ARNs for users from external AWS accounts who should be given
@@ -216,12 +217,13 @@ module "openvpn_server" {
# Master Key (e.g. arn:aws:iam::1234567890:user/foo).
cmk_user_iam_arns = []
- # Set to true to add var.domain_name as a Route 53 DNS A record for the OpenVPN
- # server
+ # Set to true to add var.domain_name as a Route 53 DNS A record for the
+ # OpenVPN server
create_route53_entry = false
- # The default OS user for the OpenVPN AMI. For AWS Ubuntu AMIs, which is what the
- # Packer template in openvpn-server.json uses, the default OS user is 'ubuntu'.
+ # The default OS user for the OpenVPN AMI. For AWS Ubuntu AMIs, which is what
+ # the Packer template in openvpn-server.json uses, the default OS user is
+ # 'ubuntu'.
default_user = "ubuntu"
# The domain name to use for the OpenVPN server. Only used if
@@ -229,67 +231,69 @@ module "openvpn_server" {
domain_name = null
# If true, the launched EC2 instance will be EBS-optimized. Note that for most
- # instance types, EBS optimization does not incur additional cost, and that many
- # newer EC2 instance types have EBS optimization enabled by default. However, if
- # you are running previous generation instances, there may be an additional cost
- # per hour to run your instances with EBS optimization enabled. Please see:
+ # instance types, EBS optimization does not incur additional cost, and that
+ # many newer EC2 instance types have EBS optimization enabled by default.
+ # However, if you are running previous generation instances, there may be an
+ # additional cost per hour to run your instances with EBS optimization
+ # enabled. Please see:
# https://aws.amazon.com/ec2/pricing/on-demand/#EBS-Optimized_Instances
ebs_optimized = true
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arn.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arn.
enable_cloudwatch_alarms = true
# Set to true to send logs to CloudWatch. This is useful in combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/log
- # /cloudwatch-log-aggregation-scripts to do log aggregation in CloudWatch.
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/logs/cloudwatch-log-aggregation-scripts
+ # to do log aggregation in CloudWatch.
enable_cloudwatch_log_aggregation = true
- # Set to true to add IAM permissions to send custom metrics to CloudWatch. This is
- # useful in combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/age
- # ts/cloudwatch-agent to get memory and disk metrics in CloudWatch for your
- # OpenVPN server.
+ # Set to true to add IAM permissions to send custom metrics to CloudWatch.
+ # This is useful in combination with
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/agents/cloudwatch-agent
+ # to get memory and disk metrics in CloudWatch for your OpenVPN server.
enable_cloudwatch_metrics = true
# Enable fail2ban to block brute force log in attempts. Defaults to true.
enable_fail2ban = true
- # Enable ip-lockdown to block access to the instance metadata. Defaults to true.
+ # Enable ip-lockdown to block access to the instance metadata. Defaults to
+ # true.
enable_ip_lockdown = true
# Set to true to add IAM permissions for ssh-grunt
- # (https://github.com/gruntwork-io/terraform-aws-security/tree/master/modules/ssh-
- # runt), which will allow you to manage SSH access via IAM groups.
+ # (https://github.com/gruntwork-io/terraform-aws-security/tree/master/modules/ssh-grunt),
+ # which will allow you to manage SSH access via IAM groups.
enable_ssh_grunt = true
- # The ARNs of external AWS accounts where your IAM users are defined. This module
- # will create IAM roles that users in those accounts will be able to assume to get
- # access to the request/revocation SQS queues.
+ # The ARNs of external AWS accounts where your IAM users are defined. This
+ # module will create IAM roles that users in those accounts will be able to
+ # assume to get access to the request/revocation SQS queues.
external_account_arns = []
- # Since our IAM users are defined in a separate AWS account, this variable is used
- # to specify the ARN of an IAM role that allows ssh-grunt to retrieve IAM group
- # and public SSH key info from that account.
+ # Since our IAM users are defined in a separate AWS account, this variable is
+ # used to specify the ARN of an IAM role that allows ssh-grunt to retrieve IAM
+ # group and public SSH key info from that account.
external_account_ssh_grunt_role_arn = ""
- # When a terraform destroy is run, should the backup s3 bucket be destroyed even
- # if it contains files. Should only be set to true for testing/development
+ # When a terraform destroy is run, should the backup s3 bucket be destroyed
+ # even if it contains files. Should only be set to true for
+ # testing/development
force_destroy = false
- # The period, in seconds, over which to measure the CPU utilization percentage for
- # the ASG.
+ # The period, in seconds, over which to measure the CPU utilization percentage
+ # for the ASG.
high_asg_cpu_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster CPU utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster CPU utilization
+ # percentage above this threshold.
high_asg_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_cpu_utilization_treat_missing_data = "missing"
# The period, in seconds, over which to measure the root disk utilization
@@ -300,41 +304,41 @@ module "openvpn_server" {
# percentage above this threshold.
high_asg_disk_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_disk_utilization_treat_missing_data = "missing"
- # The period, in seconds, over which to measure the Memory utilization percentage
- # for the ASG.
+ # The period, in seconds, over which to measure the Memory utilization
+ # percentage for the ASG.
high_asg_memory_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster Memory utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster Memory utilization
+ # percentage above this threshold.
high_asg_memory_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_memory_utilization_treat_missing_data = "missing"
- # The ID of the Route 53 Hosted Zone in which the domain should be created. Only
- # used if var.create_route53_entry is true. If null, lookup the hosted zone ID
- # using the var.base_domain_name.
+ # The ID of the Route 53 Hosted Zone in which the domain should be created.
+ # Only used if var.create_route53_entry is true. If null, lookup the hosted
+ # zone ID using the var.base_domain_name.
hosted_zone_id = null
# The type of instance to run for the OpenVPN Server
instance_type = "t3.micro"
- # The name of a Key Pair that can be used to SSH to this instance. Leave blank if
- # you don't want to enable Key Pair auth.
+ # The name of a Key Pair that can be used to SSH to this instance. Leave blank
+ # if you don't want to enable Key Pair auth.
keypair_name = null
- # The Amazon Resource Name (ARN) of an existing KMS customer master key (CMK) that
- # will be used to encrypt/decrypt backup files. If null, a key will be created
- # with permissions assigned by the following variables:
+ # The Amazon Resource Name (ARN) of an existing KMS customer master key (CMK)
+ # that will be used to encrypt/decrypt backup files. If null, a key will be
+ # created with permissions assigned by the following variables:
# cmk_administrator_iam_arns, cmk_user_iam_arns, cmk_external_user_iam_arns,
# allow_manage_key_permissions.
kms_key_arn = null
@@ -346,52 +350,55 @@ module "openvpn_server" {
# Tags to apply to every resource created by the openvpn-server module.
openvpn_server_tags = {}
- # The name of the sqs queue that will be used to receive new certificate requests.
+ # The name of the sqs queue that will be used to receive new certificate
+ # requests.
request_queue_name = "queue"
- # The name of the sqs queue that will be used to receive certification revocation
- # requests. Note that the queue name will be automatically prefixed with
- # 'openvpn-requests-'.
+ # The name of the sqs queue that will be used to receive certification
+ # revocation requests. Note that the queue name will be automatically prefixed
+ # with 'openvpn-requests-'.
revocation_queue_name = "queue"
# The size of the OpenVPN EC2 instance root volume, in GB.
root_volume_size = 8
- # When true, precreate the CloudWatch Log Group to use for log aggregation from
- # the EC2 instances. This is useful if you wish to customize the CloudWatch Log
- # Group with various settings such as retention periods and KMS encryption. When
- # false, the CloudWatch agent will automatically create a basic log group to use.
+ # When true, precreate the CloudWatch Log Group to use for log aggregation
+ # from the EC2 instances. This is useful if you wish to customize the
+ # CloudWatch Log Group with various settings such as retention periods and KMS
+ # encryption. When false, the CloudWatch agent will automatically create a
+ # basic log group to use.
should_create_cloudwatch_log_group = true
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to this OpenVPN server. This value is only used if
- # enable_ssh_grunt=true.
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to this OpenVPN server. This value is only used
+ # if enable_ssh_grunt=true.
ssh_grunt_iam_group = "ssh-grunt-users"
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to this OpenVPN server with sudo permissions. This value
- # is only used if enable_ssh_grunt=true.
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to this OpenVPN server with sudo permissions.
+ # This value is only used if enable_ssh_grunt=true.
ssh_grunt_iam_group_sudo = "ssh-grunt-sudo-users"
# The tenancy of this server. Must be one of: default, dedicated, or host.
tenancy = "default"
- # Set this variable to true to enable the use of Instance Metadata Service Version
- # 1 in this module's aws_launch_template. Note that while IMDsv2 is preferred due
- # to its special security hardening, we allow this in order to support the use
- # case of AMIs built outside of these modules that depend on IMDSv1.
+ # Set this variable to true to enable the use of Instance Metadata Service
+ # Version 1 in this module's aws_launch_template. Note that while IMDsv2 is
+ # preferred due to its special security hardening, we allow this in order to
+ # support the use case of AMIs built outside of these modules that depend on
+ # IMDSv1.
use_imdsv1 = true
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
# When true, generate Diffie-Hellman parameters using strong primes. Note that
- # while stronger primes make the keys more cryptographically secure, the effective
- # security gains are known to be insignificant in practice.
+ # while stronger primes make the keys more cryptographically secure, the
+ # effective security gains are known to be insignificant in practice.
use_strong_prime = false
# A list of CIDR ranges to be routed over the VPN.
@@ -399,19 +406,19 @@ module "openvpn_server" {
# A list of domains to push down to the client to resolve over VPN. This will
# configure the OpenVPN server to pass through domains that should be resolved
- # over the VPN connection (as opposed to the locally configured resolver) to the
- # client. Note that for each domain, all subdomains will be resolved as well.
- # E.g., if you pass in 'mydomain.local', subdomains such as
- # 'hello.world.mydomain.local' and 'example.mydomain.local' will also be forwarded
- # to through the VPN server.
+ # over the VPN connection (as opposed to the locally configured resolver) to
+ # the client. Note that for each domain, all subdomains will be resolved as
+ # well. E.g., if you pass in 'mydomain.local', subdomains such as
+ # 'hello.world.mydomain.local' and 'example.mydomain.local' will also be
+ # forwarded to through the VPN server.
vpn_search_domains = []
- # The subnet IP and mask vpn clients will be assigned addresses from. For example,
- # 172.16.1.0 255.255.255.0. This is a non-routed network that only exists between
- # the VPN server and the client. Therefore, it should NOT overlap with VPC
- # addressing, or the client won't be able to access any of the VPC IPs. In
- # general, we recommend using internal, non-RFC 1918 IP addresses, such as
- # 172.16.xx.yy.
+ # The subnet IP and mask vpn clients will be assigned addresses from. For
+ # example, 172.16.1.0 255.255.255.0. This is a non-routed network that only
+ # exists between the VPN server and the client. Therefore, it should NOT
+ # overlap with VPC addressing, or the client won't be able to access any of
+ # the VPC IPs. In general, we recommend using internal, non-RFC 1918 IP
+ # addresses, such as 172.16.xx.yy.
vpn_subnet = "172.16.1.0 255.255.255.0"
}
@@ -429,7 +436,7 @@ module "openvpn_server" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/openvpn-server?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/openvpn-server?ref=v0.104.12"
}
inputs = {
@@ -448,8 +455,8 @@ inputs = {
# required. Set to null if looking up the ami with filters.
ami =
- # Properties on the AMI that can be used to lookup a prebuilt AMI for use with the
- # OpenVPN server. You can build the AMI using the Packer template
+ # Properties on the AMI that can be used to lookup a prebuilt AMI for use with
+ # the OpenVPN server. You can build the AMI using the Packer template
# openvpn-server.json. Only used if var.ami is null. One of var.ami or
# var.ami_filters is required. Set to null if passing the ami ID directly.
ami_filters =
# The name of the S3 bucket that will be used to backup PKI secrets. This is a
- # required variable because bucket names must be globally unique across all AWS
- # customers.
+ # required variable because bucket names must be globally unique across all
+ # AWS customers.
backup_bucket_name =
# An object with fields for the country, state, locality, organization,
- # organizational unit, and email address to use with the OpenVPN CA certificate.
+ # organizational unit, and email address to use with the OpenVPN CA
+ # certificate.
ca_cert_fields = :user/). If this list is
- # empty, and var.kms_key_arn is null, the ARN of the current user will be used.
+ # A list of IAM ARNs for users who should be given administrator access to
+ # this CMK (e.g. arn:aws:iam:::user/). If this
+ # list is empty, and var.kms_key_arn is null, the ARN of the current user will
+ # be used.
cmk_administrator_iam_arns = []
# A list of IAM ARNs for users from external AWS accounts who should be given
@@ -546,12 +554,13 @@ inputs = {
# Master Key (e.g. arn:aws:iam::1234567890:user/foo).
cmk_user_iam_arns = []
- # Set to true to add var.domain_name as a Route 53 DNS A record for the OpenVPN
- # server
+ # Set to true to add var.domain_name as a Route 53 DNS A record for the
+ # OpenVPN server
create_route53_entry = false
- # The default OS user for the OpenVPN AMI. For AWS Ubuntu AMIs, which is what the
- # Packer template in openvpn-server.json uses, the default OS user is 'ubuntu'.
+ # The default OS user for the OpenVPN AMI. For AWS Ubuntu AMIs, which is what
+ # the Packer template in openvpn-server.json uses, the default OS user is
+ # 'ubuntu'.
default_user = "ubuntu"
# The domain name to use for the OpenVPN server. Only used if
@@ -559,67 +568,69 @@ inputs = {
domain_name = null
# If true, the launched EC2 instance will be EBS-optimized. Note that for most
- # instance types, EBS optimization does not incur additional cost, and that many
- # newer EC2 instance types have EBS optimization enabled by default. However, if
- # you are running previous generation instances, there may be an additional cost
- # per hour to run your instances with EBS optimization enabled. Please see:
+ # instance types, EBS optimization does not incur additional cost, and that
+ # many newer EC2 instance types have EBS optimization enabled by default.
+ # However, if you are running previous generation instances, there may be an
+ # additional cost per hour to run your instances with EBS optimization
+ # enabled. Please see:
# https://aws.amazon.com/ec2/pricing/on-demand/#EBS-Optimized_Instances
ebs_optimized = true
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arn.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arn.
enable_cloudwatch_alarms = true
# Set to true to send logs to CloudWatch. This is useful in combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/log
- # /cloudwatch-log-aggregation-scripts to do log aggregation in CloudWatch.
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/logs/cloudwatch-log-aggregation-scripts
+ # to do log aggregation in CloudWatch.
enable_cloudwatch_log_aggregation = true
- # Set to true to add IAM permissions to send custom metrics to CloudWatch. This is
- # useful in combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/age
- # ts/cloudwatch-agent to get memory and disk metrics in CloudWatch for your
- # OpenVPN server.
+ # Set to true to add IAM permissions to send custom metrics to CloudWatch.
+ # This is useful in combination with
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/agents/cloudwatch-agent
+ # to get memory and disk metrics in CloudWatch for your OpenVPN server.
enable_cloudwatch_metrics = true
# Enable fail2ban to block brute force log in attempts. Defaults to true.
enable_fail2ban = true
- # Enable ip-lockdown to block access to the instance metadata. Defaults to true.
+ # Enable ip-lockdown to block access to the instance metadata. Defaults to
+ # true.
enable_ip_lockdown = true
# Set to true to add IAM permissions for ssh-grunt
- # (https://github.com/gruntwork-io/terraform-aws-security/tree/master/modules/ssh-
- # runt), which will allow you to manage SSH access via IAM groups.
+ # (https://github.com/gruntwork-io/terraform-aws-security/tree/master/modules/ssh-grunt),
+ # which will allow you to manage SSH access via IAM groups.
enable_ssh_grunt = true
- # The ARNs of external AWS accounts where your IAM users are defined. This module
- # will create IAM roles that users in those accounts will be able to assume to get
- # access to the request/revocation SQS queues.
+ # The ARNs of external AWS accounts where your IAM users are defined. This
+ # module will create IAM roles that users in those accounts will be able to
+ # assume to get access to the request/revocation SQS queues.
external_account_arns = []
- # Since our IAM users are defined in a separate AWS account, this variable is used
- # to specify the ARN of an IAM role that allows ssh-grunt to retrieve IAM group
- # and public SSH key info from that account.
+ # Since our IAM users are defined in a separate AWS account, this variable is
+ # used to specify the ARN of an IAM role that allows ssh-grunt to retrieve IAM
+ # group and public SSH key info from that account.
external_account_ssh_grunt_role_arn = ""
- # When a terraform destroy is run, should the backup s3 bucket be destroyed even
- # if it contains files. Should only be set to true for testing/development
+ # When a terraform destroy is run, should the backup s3 bucket be destroyed
+ # even if it contains files. Should only be set to true for
+ # testing/development
force_destroy = false
- # The period, in seconds, over which to measure the CPU utilization percentage for
- # the ASG.
+ # The period, in seconds, over which to measure the CPU utilization percentage
+ # for the ASG.
high_asg_cpu_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster CPU utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster CPU utilization
+ # percentage above this threshold.
high_asg_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_cpu_utilization_treat_missing_data = "missing"
# The period, in seconds, over which to measure the root disk utilization
@@ -630,41 +641,41 @@ inputs = {
# percentage above this threshold.
high_asg_disk_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_disk_utilization_treat_missing_data = "missing"
- # The period, in seconds, over which to measure the Memory utilization percentage
- # for the ASG.
+ # The period, in seconds, over which to measure the Memory utilization
+ # percentage for the ASG.
high_asg_memory_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster Memory utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster Memory utilization
+ # percentage above this threshold.
high_asg_memory_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_memory_utilization_treat_missing_data = "missing"
- # The ID of the Route 53 Hosted Zone in which the domain should be created. Only
- # used if var.create_route53_entry is true. If null, lookup the hosted zone ID
- # using the var.base_domain_name.
+ # The ID of the Route 53 Hosted Zone in which the domain should be created.
+ # Only used if var.create_route53_entry is true. If null, lookup the hosted
+ # zone ID using the var.base_domain_name.
hosted_zone_id = null
# The type of instance to run for the OpenVPN Server
instance_type = "t3.micro"
- # The name of a Key Pair that can be used to SSH to this instance. Leave blank if
- # you don't want to enable Key Pair auth.
+ # The name of a Key Pair that can be used to SSH to this instance. Leave blank
+ # if you don't want to enable Key Pair auth.
keypair_name = null
- # The Amazon Resource Name (ARN) of an existing KMS customer master key (CMK) that
- # will be used to encrypt/decrypt backup files. If null, a key will be created
- # with permissions assigned by the following variables:
+ # The Amazon Resource Name (ARN) of an existing KMS customer master key (CMK)
+ # that will be used to encrypt/decrypt backup files. If null, a key will be
+ # created with permissions assigned by the following variables:
# cmk_administrator_iam_arns, cmk_user_iam_arns, cmk_external_user_iam_arns,
# allow_manage_key_permissions.
kms_key_arn = null
@@ -676,52 +687,55 @@ inputs = {
# Tags to apply to every resource created by the openvpn-server module.
openvpn_server_tags = {}
- # The name of the sqs queue that will be used to receive new certificate requests.
+ # The name of the sqs queue that will be used to receive new certificate
+ # requests.
request_queue_name = "queue"
- # The name of the sqs queue that will be used to receive certification revocation
- # requests. Note that the queue name will be automatically prefixed with
- # 'openvpn-requests-'.
+ # The name of the sqs queue that will be used to receive certification
+ # revocation requests. Note that the queue name will be automatically prefixed
+ # with 'openvpn-requests-'.
revocation_queue_name = "queue"
# The size of the OpenVPN EC2 instance root volume, in GB.
root_volume_size = 8
- # When true, precreate the CloudWatch Log Group to use for log aggregation from
- # the EC2 instances. This is useful if you wish to customize the CloudWatch Log
- # Group with various settings such as retention periods and KMS encryption. When
- # false, the CloudWatch agent will automatically create a basic log group to use.
+ # When true, precreate the CloudWatch Log Group to use for log aggregation
+ # from the EC2 instances. This is useful if you wish to customize the
+ # CloudWatch Log Group with various settings such as retention periods and KMS
+ # encryption. When false, the CloudWatch agent will automatically create a
+ # basic log group to use.
should_create_cloudwatch_log_group = true
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to this OpenVPN server. This value is only used if
- # enable_ssh_grunt=true.
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to this OpenVPN server. This value is only used
+ # if enable_ssh_grunt=true.
ssh_grunt_iam_group = "ssh-grunt-users"
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to this OpenVPN server with sudo permissions. This value
- # is only used if enable_ssh_grunt=true.
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to this OpenVPN server with sudo permissions.
+ # This value is only used if enable_ssh_grunt=true.
ssh_grunt_iam_group_sudo = "ssh-grunt-sudo-users"
# The tenancy of this server. Must be one of: default, dedicated, or host.
tenancy = "default"
- # Set this variable to true to enable the use of Instance Metadata Service Version
- # 1 in this module's aws_launch_template. Note that while IMDsv2 is preferred due
- # to its special security hardening, we allow this in order to support the use
- # case of AMIs built outside of these modules that depend on IMDSv1.
+ # Set this variable to true to enable the use of Instance Metadata Service
+ # Version 1 in this module's aws_launch_template. Note that while IMDsv2 is
+ # preferred due to its special security hardening, we allow this in order to
+ # support the use case of AMIs built outside of these modules that depend on
+ # IMDSv1.
use_imdsv1 = true
- # When true, all IAM policies will be managed as dedicated policies rather than
- # inline policies attached to the IAM roles. Dedicated managed policies are
- # friendlier to automated policy checkers, which may scan a single resource for
- # findings. As such, it is important to avoid inline policies when targeting
- # compliance with various security standards.
+ # When true, all IAM policies will be managed as dedicated policies rather
+ # than inline policies attached to the IAM roles. Dedicated managed policies
+ # are friendlier to automated policy checkers, which may scan a single
+ # resource for findings. As such, it is important to avoid inline policies
+ # when targeting compliance with various security standards.
use_managed_iam_policies = true
# When true, generate Diffie-Hellman parameters using strong primes. Note that
- # while stronger primes make the keys more cryptographically secure, the effective
- # security gains are known to be insignificant in practice.
+ # while stronger primes make the keys more cryptographically secure, the
+ # effective security gains are known to be insignificant in practice.
use_strong_prime = false
# A list of CIDR ranges to be routed over the VPN.
@@ -729,19 +743,19 @@ inputs = {
# A list of domains to push down to the client to resolve over VPN. This will
# configure the OpenVPN server to pass through domains that should be resolved
- # over the VPN connection (as opposed to the locally configured resolver) to the
- # client. Note that for each domain, all subdomains will be resolved as well.
- # E.g., if you pass in 'mydomain.local', subdomains such as
- # 'hello.world.mydomain.local' and 'example.mydomain.local' will also be forwarded
- # to through the VPN server.
+ # over the VPN connection (as opposed to the locally configured resolver) to
+ # the client. Note that for each domain, all subdomains will be resolved as
+ # well. E.g., if you pass in 'mydomain.local', subdomains such as
+ # 'hello.world.mydomain.local' and 'example.mydomain.local' will also be
+ # forwarded to through the VPN server.
vpn_search_domains = []
- # The subnet IP and mask vpn clients will be assigned addresses from. For example,
- # 172.16.1.0 255.255.255.0. This is a non-routed network that only exists between
- # the VPN server and the client. Therefore, it should NOT overlap with VPC
- # addressing, or the client won't be able to access any of the VPC IPs. In
- # general, we recommend using internal, non-RFC 1918 IP addresses, such as
- # 172.16.xx.yy.
+ # The subnet IP and mask vpn clients will be assigned addresses from. For
+ # example, 172.16.1.0 255.255.255.0. This is a non-routed network that only
+ # exists between the VPN server and the client. Therefore, it should NOT
+ # overlap with VPC addressing, or the client won't be able to access any of
+ # the VPC IPs. In general, we recommend using internal, non-RFC 1918 IP
+ # addresses, such as 172.16.xx.yy.
vpn_subnet = "172.16.1.0 255.255.255.0"
}
@@ -1507,11 +1521,11 @@ The security group ID of the OpenVPN server.
diff --git a/docs/reference/services/security/tailscale-subnet-router.md b/docs/reference/services/security/tailscale-subnet-router.md
index d977c200c2..cddf181974 100644
--- a/docs/reference/services/security/tailscale-subnet-router.md
+++ b/docs/reference/services/security/tailscale-subnet-router.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Tailscale Subnet Router
-View Source
+View Source
Release Notes
@@ -77,7 +77,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -94,7 +94,7 @@ access services within your VPC through the tailnet.
### What AMI should I use?
-Any AMI can be used with this module, provided that the [install-tailscale](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/mgmt/tailscale-subnet-router/scripts/install-tailscale.sh) script is installed
+Any AMI can be used with this module, provided that the [install-tailscale](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/mgmt/tailscale-subnet-router/scripts/install-tailscale.sh) script is installed
into the AMI. The `install-tailscale` script ensures that Tailscale is installed with the `init-tailscale-subnet-router` boot
script, which can be used to load the auth key from AWS Secrets Manager to authenticate to Tailscale at boot time.
@@ -150,7 +150,7 @@ resource "aws_iam_role_policy_attachment" "attachment" {
module "tailscale_subnet_router" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/tailscale-subnet-router?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/tailscale-subnet-router?ref=v0.104.12"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -161,8 +161,8 @@ module "tailscale_subnet_router" {
# var.ami_filters is required. Set to null if looking up the ami with filters.
ami =
- # Properties on the AMI that can be used to lookup a prebuilt AMI for use with the
- # Tailscale subnet router. You can build the AMI using the Packer template
+ # Properties on the AMI that can be used to lookup a prebuilt AMI for use with
+ # the Tailscale subnet router. You can build the AMI using the Packer template
# tailscale-subnet-router-ubuntu.json. Only used if var.ami is null. One of
# var.ami or var.ami_filters is required. Set to null if passing the ami ID
# directly.
@@ -174,12 +174,12 @@ module "tailscale_subnet_router" {
))
)>
- # The ARN of a Secrets Manager entry containing the Tailscale auth key to use for
- # authenticating the server.
+ # The ARN of a Secrets Manager entry containing the Tailscale auth key to use
+ # for authenticating the server.
auth_key_secrets_manager_arn =
- # The name of the server. This will be used to namespace all resources created by
- # this module.
+ # The name of the server. This will be used to namespace all resources created
+ # by this module.
name =
# The ids of the subnets where this server should be deployed.
@@ -196,12 +196,12 @@ module "tailscale_subnet_router" {
# relay server.
additional_security_groups = []
- # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and disk
- # space usage) should send notifications.
+ # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
+ # disk space usage) should send notifications.
alarms_sns_topic_arn = []
- # Cloud init scripts to run on the Tailscale subnet router while it boots. See the
- # part blocks in
+ # Cloud init scripts to run on the Tailscale subnet router while it boots. See
+ # the part blocks in
# https://www.terraform.io/docs/providers/template/d/cloudinit_config.html for
# syntax.
cloud_init_parts = {}
@@ -211,87 +211,89 @@ module "tailscale_subnet_router" {
cloudwatch_log_group_kms_key_id = null
# The number of days to retain log events in the log group. Refer to
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # watch_log_group#retention_in_days for all the valid values. When null, the log
- # events are retained forever.
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group#retention_in_days
+ # for all the valid values. When null, the log events are retained forever.
cloudwatch_log_group_retention_in_days = null
- # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys are
- # tag keys and values are tag values.
+ # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys
+ # are tag keys and values are tag values.
cloudwatch_log_group_tags = null
- # The default OS user for the Tailscale subnet router AMI. For AWS Ubuntu AMIs,
- # which is what the Packer template in tailscale-subnet-router-ubuntu.json uses,
- # the default OS user is 'ubuntu'.
+ # The default OS user for the Tailscale subnet router AMI. For AWS Ubuntu
+ # AMIs, which is what the Packer template in
+ # tailscale-subnet-router-ubuntu.json uses, the default OS user is 'ubuntu'.
default_user = "ubuntu"
# If true, the launched EC2 instance will be EBS-optimized. Note that for most
- # instance types, EBS optimization does not incur additional cost, and that many
- # newer EC2 instance types have EBS optimization enabled by default. However, if
- # you are running previous generation instances, there may be an additional cost
- # per hour to run your instances with EBS optimization enabled. Please see:
+ # instance types, EBS optimization does not incur additional cost, and that
+ # many newer EC2 instance types have EBS optimization enabled by default.
+ # However, if you are running previous generation instances, there may be an
+ # additional cost per hour to run your instances with EBS optimization
+ # enabled. Please see:
# https://aws.amazon.com/ec2/pricing/on-demand/#EBS-Optimized_Instances
ebs_optimized = true
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arn.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arn.
enable_cloudwatch_alarms = true
# Set to true to send logs to CloudWatch. This is useful in combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/log
- # /cloudwatch-log-aggregation-scripts to do log aggregation in CloudWatch.
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/logs/cloudwatch-log-aggregation-scripts
+ # to do log aggregation in CloudWatch.
enable_cloudwatch_log_aggregation = true
- # Set to true to add IAM permissions to send custom metrics to CloudWatch. This is
- # useful in combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/age
- # ts/cloudwatch-agent to get memory and disk metrics in CloudWatch for your
- # Tailscale subnet router.
+ # Set to true to add IAM permissions to send custom metrics to CloudWatch.
+ # This is useful in combination with
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/agents/cloudwatch-agent
+ # to get memory and disk metrics in CloudWatch for your Tailscale subnet
+ # router.
enable_cloudwatch_metrics = true
# Enable fail2ban to block brute force log in attempts. Defaults to true.
enable_fail2ban = true
# Set this variable to true to enable the Instance Metadata Service (IMDS)
- # endpoint, which is used to fetch information such as user-data scripts, instance
- # IP address and region, etc. Set this variable to false if you do not want the
- # IMDS endpoint enabled for instances launched into the Auto Scaling Group.
+ # endpoint, which is used to fetch information such as user-data scripts,
+ # instance IP address and region, etc. Set this variable to false if you do
+ # not want the IMDS endpoint enabled for instances launched into the Auto
+ # Scaling Group.
enable_imds = true
- # Enable ip-lockdown to block access to the instance metadata. Defaults to true.
+ # Enable ip-lockdown to block access to the instance metadata. Defaults to
+ # true.
enable_ip_lockdown = true
# Set to true to add IAM permissions for ssh-grunt
- # (https://github.com/gruntwork-io/terraform-aws-security/tree/master/modules/ssh-
- # runt), which will allow you to manage SSH access via IAM groups.
+ # (https://github.com/gruntwork-io/terraform-aws-security/tree/master/modules/ssh-grunt),
+ # which will allow you to manage SSH access via IAM groups.
enable_ssh_grunt = true
- # Whether to configure DNS to Tailscale on the EC2 instance. By default we disable
- # the tailnet DNS as it is generally best to let Amazon handle the DNS
- # configuration on EC2 instances. This is most useful when the subnet router needs
- # to communicate with other services on your tailnet.
+ # Whether to configure DNS to Tailscale on the EC2 instance. By default we
+ # disable the tailnet DNS as it is generally best to let Amazon handle the DNS
+ # configuration on EC2 instances. This is most useful when the subnet router
+ # needs to communicate with other services on your tailnet.
enable_tailscale_dns = false
- # If you are using ssh-grunt and your IAM users / groups are defined in a separate
- # AWS account, you can use this variable to specify the ARN of an IAM role that
- # ssh-grunt can assume to retrieve IAM group and public SSH key info from that
- # account. To omit this variable, set it to an empty string (do NOT use null, or
- # Terraform will complain).
+ # If you are using ssh-grunt and your IAM users / groups are defined in a
+ # separate AWS account, you can use this variable to specify the ARN of an IAM
+ # role that ssh-grunt can assume to retrieve IAM group and public SSH key info
+ # from that account. To omit this variable, set it to an empty string (do NOT
+ # use null, or Terraform will complain).
external_account_ssh_grunt_role_arn = ""
- # The period, in seconds, over which to measure the CPU utilization percentage for
- # the ASG.
+ # The period, in seconds, over which to measure the CPU utilization percentage
+ # for the ASG.
high_asg_cpu_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster CPU utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster CPU utilization
+ # percentage above this threshold.
high_asg_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_cpu_utilization_treat_missing_data = "missing"
# The period, in seconds, over which to measure the root disk utilization
@@ -302,69 +304,71 @@ module "tailscale_subnet_router" {
# percentage above this threshold.
high_asg_disk_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_disk_utilization_treat_missing_data = "missing"
- # The period, in seconds, over which to measure the Memory utilization percentage
- # for the ASG.
+ # The period, in seconds, over which to measure the Memory utilization
+ # percentage for the ASG.
high_asg_memory_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster Memory utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster Memory utilization
+ # percentage above this threshold.
high_asg_memory_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_memory_utilization_treat_missing_data = "missing"
# The type of EC2 instance to run (e.g. t2.micro)
instance_type = "t3.nano"
- # The number of seconds until a newly launched instance is configured and ready to
- # use.
+ # The number of seconds until a newly launched instance is configured and
+ # ready to use.
instance_warmup = null
- # The name of a Key Pair that can be used to SSH to this instance. Leave blank if
- # you don't want to enable Key Pair auth.
+ # The name of a Key Pair that can be used to SSH to this instance. Leave blank
+ # if you don't want to enable Key Pair auth.
keypair_name = null
- # List of CIDR blocks to expose as routes on the tailnet through this server. If
- # null, defaults to the entire VPC CIDR block.
+ # List of CIDR blocks to expose as routes on the tailnet through this server.
+ # If null, defaults to the entire VPC CIDR block.
routes = null
- # When true, precreate the CloudWatch Log Group to use for log aggregation from
- # the EC2 instances. This is useful if you wish to customize the CloudWatch Log
- # Group with various settings such as retention periods and KMS encryption. When
- # false, the CloudWatch agent will automatically create a basic log group to use.
+ # When true, precreate the CloudWatch Log Group to use for log aggregation
+ # from the EC2 instances. This is useful if you wish to customize the
+ # CloudWatch Log Group with various settings such as retention periods and KMS
+ # encryption. When false, the CloudWatch agent will automatically create a
+ # basic log group to use.
should_create_cloudwatch_log_group = true
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to this Tailscale subnet router. This value is only used
- # if enable_ssh_grunt=true.
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to this Tailscale subnet router. This value is
+ # only used if enable_ssh_grunt=true.
ssh_grunt_iam_group = "ssh-grunt-users"
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to this Tailscale subnet router with sudo permissions.
- # This value is only used if enable_ssh_grunt=true.
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to this Tailscale subnet router with sudo
+ # permissions. This value is only used if enable_ssh_grunt=true.
ssh_grunt_iam_group_sudo = "ssh-grunt-sudo-users"
# Advertised hostname of the server on the tailnet. If null, defaults to the
# var.name input value.
tailnet_hostname = null
- # Advertise tags for Tailscale subnet router. These are used on the 'up' command
- # to control ACLs in Tailscale.
+ # Advertise tags for Tailscale subnet router. These are used on the 'up'
+ # command to control ACLs in Tailscale.
tailscale_advertise_tags = []
- # Set this variable to true to enable the use of Instance Metadata Service Version
- # 1 in this module's aws_launch_template. Note that while IMDsv2 is preferred due
- # to its special security hardening, we allow this in order to support the use
- # case of AMIs built outside of these modules that depend on IMDSv1.
+ # Set this variable to true to enable the use of Instance Metadata Service
+ # Version 1 in this module's aws_launch_template. Note that while IMDsv2 is
+ # preferred due to its special security hardening, we allow this in order to
+ # support the use case of AMIs built outside of these modules that depend on
+ # IMDSv1.
use_imdsv1 = false
}
@@ -382,7 +386,7 @@ module "tailscale_subnet_router" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/tailscale-subnet-router?ref=v0.104.10"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/tailscale-subnet-router?ref=v0.104.12"
}
inputs = {
@@ -396,8 +400,8 @@ inputs = {
# var.ami_filters is required. Set to null if looking up the ami with filters.
ami =
- # Properties on the AMI that can be used to lookup a prebuilt AMI for use with the
- # Tailscale subnet router. You can build the AMI using the Packer template
+ # Properties on the AMI that can be used to lookup a prebuilt AMI for use with
+ # the Tailscale subnet router. You can build the AMI using the Packer template
# tailscale-subnet-router-ubuntu.json. Only used if var.ami is null. One of
# var.ami or var.ami_filters is required. Set to null if passing the ami ID
# directly.
@@ -409,12 +413,12 @@ inputs = {
))
)>
- # The ARN of a Secrets Manager entry containing the Tailscale auth key to use for
- # authenticating the server.
+ # The ARN of a Secrets Manager entry containing the Tailscale auth key to use
+ # for authenticating the server.
auth_key_secrets_manager_arn =
- # The name of the server. This will be used to namespace all resources created by
- # this module.
+ # The name of the server. This will be used to namespace all resources created
+ # by this module.
name =
# The ids of the subnets where this server should be deployed.
@@ -431,12 +435,12 @@ inputs = {
# relay server.
additional_security_groups = []
- # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and disk
- # space usage) should send notifications.
+ # The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
+ # disk space usage) should send notifications.
alarms_sns_topic_arn = []
- # Cloud init scripts to run on the Tailscale subnet router while it boots. See the
- # part blocks in
+ # Cloud init scripts to run on the Tailscale subnet router while it boots. See
+ # the part blocks in
# https://www.terraform.io/docs/providers/template/d/cloudinit_config.html for
# syntax.
cloud_init_parts = {}
@@ -446,87 +450,89 @@ inputs = {
cloudwatch_log_group_kms_key_id = null
# The number of days to retain log events in the log group. Refer to
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/clou
- # watch_log_group#retention_in_days for all the valid values. When null, the log
- # events are retained forever.
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group#retention_in_days
+ # for all the valid values. When null, the log events are retained forever.
cloudwatch_log_group_retention_in_days = null
- # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys are
- # tag keys and values are tag values.
+ # Tags to apply on the CloudWatch Log Group, encoded as a map where the keys
+ # are tag keys and values are tag values.
cloudwatch_log_group_tags = null
- # The default OS user for the Tailscale subnet router AMI. For AWS Ubuntu AMIs,
- # which is what the Packer template in tailscale-subnet-router-ubuntu.json uses,
- # the default OS user is 'ubuntu'.
+ # The default OS user for the Tailscale subnet router AMI. For AWS Ubuntu
+ # AMIs, which is what the Packer template in
+ # tailscale-subnet-router-ubuntu.json uses, the default OS user is 'ubuntu'.
default_user = "ubuntu"
# If true, the launched EC2 instance will be EBS-optimized. Note that for most
- # instance types, EBS optimization does not incur additional cost, and that many
- # newer EC2 instance types have EBS optimization enabled by default. However, if
- # you are running previous generation instances, there may be an additional cost
- # per hour to run your instances with EBS optimization enabled. Please see:
+ # instance types, EBS optimization does not incur additional cost, and that
+ # many newer EC2 instance types have EBS optimization enabled by default.
+ # However, if you are running previous generation instances, there may be an
+ # additional cost per hour to run your instances with EBS optimization
+ # enabled. Please see:
# https://aws.amazon.com/ec2/pricing/on-demand/#EBS-Optimized_Instances
ebs_optimized = true
- # Set to true to enable several basic CloudWatch alarms around CPU usage, memory
- # usage, and disk space usage. If set to true, make sure to specify SNS topics to
- # send notifications to using var.alarms_sns_topic_arn.
+ # Set to true to enable several basic CloudWatch alarms around CPU usage,
+ # memory usage, and disk space usage. If set to true, make sure to specify SNS
+ # topics to send notifications to using var.alarms_sns_topic_arn.
enable_cloudwatch_alarms = true
# Set to true to send logs to CloudWatch. This is useful in combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/log
- # /cloudwatch-log-aggregation-scripts to do log aggregation in CloudWatch.
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/logs/cloudwatch-log-aggregation-scripts
+ # to do log aggregation in CloudWatch.
enable_cloudwatch_log_aggregation = true
- # Set to true to add IAM permissions to send custom metrics to CloudWatch. This is
- # useful in combination with
- # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/age
- # ts/cloudwatch-agent to get memory and disk metrics in CloudWatch for your
- # Tailscale subnet router.
+ # Set to true to add IAM permissions to send custom metrics to CloudWatch.
+ # This is useful in combination with
+ # https://github.com/gruntwork-io/terraform-aws-monitoring/tree/master/modules/agents/cloudwatch-agent
+ # to get memory and disk metrics in CloudWatch for your Tailscale subnet
+ # router.
enable_cloudwatch_metrics = true
# Enable fail2ban to block brute force log in attempts. Defaults to true.
enable_fail2ban = true
# Set this variable to true to enable the Instance Metadata Service (IMDS)
- # endpoint, which is used to fetch information such as user-data scripts, instance
- # IP address and region, etc. Set this variable to false if you do not want the
- # IMDS endpoint enabled for instances launched into the Auto Scaling Group.
+ # endpoint, which is used to fetch information such as user-data scripts,
+ # instance IP address and region, etc. Set this variable to false if you do
+ # not want the IMDS endpoint enabled for instances launched into the Auto
+ # Scaling Group.
enable_imds = true
- # Enable ip-lockdown to block access to the instance metadata. Defaults to true.
+ # Enable ip-lockdown to block access to the instance metadata. Defaults to
+ # true.
enable_ip_lockdown = true
# Set to true to add IAM permissions for ssh-grunt
- # (https://github.com/gruntwork-io/terraform-aws-security/tree/master/modules/ssh-
- # runt), which will allow you to manage SSH access via IAM groups.
+ # (https://github.com/gruntwork-io/terraform-aws-security/tree/master/modules/ssh-grunt),
+ # which will allow you to manage SSH access via IAM groups.
enable_ssh_grunt = true
- # Whether to configure DNS to Tailscale on the EC2 instance. By default we disable
- # the tailnet DNS as it is generally best to let Amazon handle the DNS
- # configuration on EC2 instances. This is most useful when the subnet router needs
- # to communicate with other services on your tailnet.
+ # Whether to configure DNS to Tailscale on the EC2 instance. By default we
+ # disable the tailnet DNS as it is generally best to let Amazon handle the DNS
+ # configuration on EC2 instances. This is most useful when the subnet router
+ # needs to communicate with other services on your tailnet.
enable_tailscale_dns = false
- # If you are using ssh-grunt and your IAM users / groups are defined in a separate
- # AWS account, you can use this variable to specify the ARN of an IAM role that
- # ssh-grunt can assume to retrieve IAM group and public SSH key info from that
- # account. To omit this variable, set it to an empty string (do NOT use null, or
- # Terraform will complain).
+ # If you are using ssh-grunt and your IAM users / groups are defined in a
+ # separate AWS account, you can use this variable to specify the ARN of an IAM
+ # role that ssh-grunt can assume to retrieve IAM group and public SSH key info
+ # from that account. To omit this variable, set it to an empty string (do NOT
+ # use null, or Terraform will complain).
external_account_ssh_grunt_role_arn = ""
- # The period, in seconds, over which to measure the CPU utilization percentage for
- # the ASG.
+ # The period, in seconds, over which to measure the CPU utilization percentage
+ # for the ASG.
high_asg_cpu_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster CPU utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster CPU utilization
+ # percentage above this threshold.
high_asg_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_cpu_utilization_treat_missing_data = "missing"
# The period, in seconds, over which to measure the root disk utilization
@@ -537,69 +543,71 @@ inputs = {
# percentage above this threshold.
high_asg_disk_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_disk_utilization_treat_missing_data = "missing"
- # The period, in seconds, over which to measure the Memory utilization percentage
- # for the ASG.
+ # The period, in seconds, over which to measure the Memory utilization
+ # percentage for the ASG.
high_asg_memory_utilization_period = 60
- # Trigger an alarm if the ASG has an average cluster Memory utilization percentage
- # above this threshold.
+ # Trigger an alarm if the ASG has an average cluster Memory utilization
+ # percentage above this threshold.
high_asg_memory_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEma
- # l.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching'
- # or 'notBreaching'.
+ # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
+ # Based on
+ # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
+ # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
high_asg_memory_utilization_treat_missing_data = "missing"
# The type of EC2 instance to run (e.g. t2.micro)
instance_type = "t3.nano"
- # The number of seconds until a newly launched instance is configured and ready to
- # use.
+ # The number of seconds until a newly launched instance is configured and
+ # ready to use.
instance_warmup = null
- # The name of a Key Pair that can be used to SSH to this instance. Leave blank if
- # you don't want to enable Key Pair auth.
+ # The name of a Key Pair that can be used to SSH to this instance. Leave blank
+ # if you don't want to enable Key Pair auth.
keypair_name = null
- # List of CIDR blocks to expose as routes on the tailnet through this server. If
- # null, defaults to the entire VPC CIDR block.
+ # List of CIDR blocks to expose as routes on the tailnet through this server.
+ # If null, defaults to the entire VPC CIDR block.
routes = null
- # When true, precreate the CloudWatch Log Group to use for log aggregation from
- # the EC2 instances. This is useful if you wish to customize the CloudWatch Log
- # Group with various settings such as retention periods and KMS encryption. When
- # false, the CloudWatch agent will automatically create a basic log group to use.
+ # When true, precreate the CloudWatch Log Group to use for log aggregation
+ # from the EC2 instances. This is useful if you wish to customize the
+ # CloudWatch Log Group with various settings such as retention periods and KMS
+ # encryption. When false, the CloudWatch agent will automatically create a
+ # basic log group to use.
should_create_cloudwatch_log_group = true
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to this Tailscale subnet router. This value is only used
- # if enable_ssh_grunt=true.
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to this Tailscale subnet router. This value is
+ # only used if enable_ssh_grunt=true.
ssh_grunt_iam_group = "ssh-grunt-users"
- # If you are using ssh-grunt, this is the name of the IAM group from which users
- # will be allowed to SSH to this Tailscale subnet router with sudo permissions.
- # This value is only used if enable_ssh_grunt=true.
+ # If you are using ssh-grunt, this is the name of the IAM group from which
+ # users will be allowed to SSH to this Tailscale subnet router with sudo
+ # permissions. This value is only used if enable_ssh_grunt=true.
ssh_grunt_iam_group_sudo = "ssh-grunt-sudo-users"
# Advertised hostname of the server on the tailnet. If null, defaults to the
# var.name input value.
tailnet_hostname = null
- # Advertise tags for Tailscale subnet router. These are used on the 'up' command
- # to control ACLs in Tailscale.
+ # Advertise tags for Tailscale subnet router. These are used on the 'up'
+ # command to control ACLs in Tailscale.
tailscale_advertise_tags = []
- # Set this variable to true to enable the use of Instance Metadata Service Version
- # 1 in this module's aws_launch_template. Note that while IMDsv2 is preferred due
- # to its special security hardening, we allow this in order to support the use
- # case of AMIs built outside of these modules that depend on IMDSv1.
+ # Set this variable to true to enable the use of Instance Metadata Service
+ # Version 1 in this module's aws_launch_template. Note that while IMDsv2 is
+ # preferred due to its special security hardening, we allow this in order to
+ # support the use case of AMIs built outside of these modules that depend on
+ # IMDSv1.
use_imdsv1 = false
}
@@ -1087,11 +1095,11 @@ ID of the primary security group attached to the Tailscale relay server.
diff --git a/docs/reference/services/security/tls-scripts.md b/docs/reference/services/security/tls-scripts.md
index eb1bee7c47..3cb98fa465 100644
--- a/docs/reference/services/security/tls-scripts.md
+++ b/docs/reference/services/security/tls-scripts.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# TLS Scripts
-View Source
+View Source
Release Notes
@@ -54,33 +54,33 @@ If you’ve never used the Service Catalog before, make sure to read
### About TLS
-* [How does TLS/SSL work?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/tls-scripts/core-concepts.md#how-does-tlsssl-work)
-* [What are commercial or public Certificate Authorities?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/tls-scripts/core-concepts.md#what-are-commercial-or-public-certificate-authorities)
-* [How does Gruntwork generate a TLS cert for private services?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/tls-scripts/core-concepts.md#how-does-gruntwork-generate-a-tls-cert-for-private-services)
+* [How does TLS/SSL work?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/tls-scripts/core-concepts.md#how-does-tlsssl-work)
+* [What are commercial or public Certificate Authorities?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/tls-scripts/core-concepts.md#what-are-commercial-or-public-certificate-authorities)
+* [How does Gruntwork generate a TLS cert for private services?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/tls-scripts/core-concepts.md#how-does-gruntwork-generate-a-tls-cert-for-private-services)
### About the scripts specifically
-* [How does create-tls-cert work?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/tls-scripts/core-concepts.md#how-does-create-tls-cert-work)
-* [How does download-rds-ca-certs work?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/tls-scripts/core-concepts.md#how-does-download-rds-ca-certs-work)
-* [How does generate-trust-stores work?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/tls-scripts/core-concepts.md#how-does-generate-trust-stores-work)
+* [How does create-tls-cert work?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/tls-scripts/core-concepts.md#how-does-create-tls-cert-work)
+* [How does download-rds-ca-certs work?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/tls-scripts/core-concepts.md#how-does-download-rds-ca-certs-work)
+* [How does generate-trust-stores work?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/tls-scripts/core-concepts.md#how-does-generate-trust-stores-work)
## Deploy
### Running
-* [How do I run these scripts using Docker?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/tls-scripts/core-concepts.md#how-do-i-run-these-scripts-using-docker)
-* [How do I create self-signed TLS certs?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/tls-scripts/core-concepts.md#how-do-i-create-self-signed-tls-certs)
-* [Should I store certs in AWS Secrets Manager or Amazon Certificate Manager?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/tls-scripts/core-concepts.md#should-i-store-certs-in-aws-secrets-manager-or-amazon-certificate-manager)
-* [Generating self-signed certs for local dev and testing](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/tls-scripts/core-concepts.md#generating-self-signed-certs-for-local-dev-and-testing)
-* [Generating self-signed certs for prod, encrypting certs locally with KMS](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/tls-scripts/core-concepts.md#generating-self-signed-certs-for-prod-encrypting-certs-locally-with-kms)
-* [Generating self-signed certs for prod, using AWS Secrets Manager for storage](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/tls-scripts/core-concepts.md#generating-self-signed-certs-for-prod-using-aws-secrets-manager-for-storage)
-* [Generating self-signed certs for prod, using Amazon Certificate Manager for storage](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/tls-scripts/core-concepts.md#generating-self-signed-certs-for-prod-using-amazon-certificate-manager-for-storage)
-* [How do I download CA public keys for validating RDS TLS connections?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/tls-scripts/core-concepts.md#how-do-i-download-CA-public-keys-for-validating-rds-tls-connections)
-* [How do I generate key stores and trust stores to manage TLS certificates for JVM apps?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/tls-scripts/core-concepts.md#how-do-i-generate-key-stores-and-trust-stores-to-manage-tls-certificates-for-jvm-apps)
+* [How do I run these scripts using Docker?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/tls-scripts/core-concepts.md#how-do-i-run-these-scripts-using-docker)
+* [How do I create self-signed TLS certs?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/tls-scripts/core-concepts.md#how-do-i-create-self-signed-tls-certs)
+* [Should I store certs in AWS Secrets Manager or Amazon Certificate Manager?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/tls-scripts/core-concepts.md#should-i-store-certs-in-aws-secrets-manager-or-amazon-certificate-manager)
+* [Generating self-signed certs for local dev and testing](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/tls-scripts/core-concepts.md#generating-self-signed-certs-for-local-dev-and-testing)
+* [Generating self-signed certs for prod, encrypting certs locally with KMS](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/tls-scripts/core-concepts.md#generating-self-signed-certs-for-prod-encrypting-certs-locally-with-kms)
+* [Generating self-signed certs for prod, using AWS Secrets Manager for storage](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/tls-scripts/core-concepts.md#generating-self-signed-certs-for-prod-using-aws-secrets-manager-for-storage)
+* [Generating self-signed certs for prod, using Amazon Certificate Manager for storage](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/tls-scripts/core-concepts.md#generating-self-signed-certs-for-prod-using-amazon-certificate-manager-for-storage)
+* [How do I download CA public keys for validating RDS TLS connections?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/tls-scripts/core-concepts.md#how-do-i-download-CA-public-keys-for-validating-rds-tls-connections)
+* [How do I generate key stores and trust stores to manage TLS certificates for JVM apps?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/tls-scripts/core-concepts.md#how-do-i-generate-key-stores-and-trust-stores-to-manage-tls-certificates-for-jvm-apps)
### Testing
-* [How do I test these scripts using Docker?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.10/modules/tls-scripts/core-concepts.md#how-do-i-test-these-scripts-using-docker)
+* [How do I test these scripts using Docker?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.104.12/modules/tls-scripts/core-concepts.md#how-do-i-test-these-scripts-using-docker)
@@ -106,11 +106,11 @@ If you’ve never used the Service Catalog before, make sure to read