diff --git a/examples/vault-cluster-private/main.tf b/examples/vault-cluster-private/main.tf index 41229919..e984dcab 100644 --- a/examples/vault-cluster-private/main.tf +++ b/examples/vault-cluster-private/main.tf @@ -74,11 +74,11 @@ data "template_file" "user_data_vault_cluster" { module "security_group_rules" { source = "github.com/hashicorp/terraform-aws-consul.git//modules/consul-client-security-group-rules?ref=v0.3.3" - security_group_id = "${module.vault_cluster.security_group_id}" + security_group_id = "${module.vault_cluster.security_group_id}" # To make testing easier, we allow requests from any IP address here but in a production deployment, we *strongly* # recommend you limit this to the IP address ranges of known, trusted servers inside your VPC. - + allowed_inbound_cidr_blocks = ["0.0.0.0/0"] } @@ -141,4 +141,4 @@ data "aws_subnet_ids" "default" { vpc_id = "${data.aws_vpc.default.id}" } -data "aws_region" "current" {} \ No newline at end of file +data "aws_region" "current" {} diff --git a/examples/vault-ddb-backend/README.md b/examples/vault-ddb-backend/README.md new file mode 100644 index 00000000..04a7ca7b --- /dev/null +++ b/examples/vault-ddb-backend/README.md @@ -0,0 +1,43 @@ +# Vault Cluster with DDB backend example + +This folder shows an example of Terraform code to deploy a [Vault](https://www.vaultproject.io/) cluster in +[AWS](https://aws.amazon.com/) using the [vault-cluster module](https://github.com/hashicorp/terraform-aws-vault/tree/master/modules/vault-cluster). +The Vault cluster uses [DynamoDB](https://aws.amazon.com/dynamodb/) as a high-availability storage backend. + +This example creates a Vault cluster spread across the subnets in the default VPC of the AWS account. For an example of a Vault cluster +that is publicly accessible, see [vault-cluster-public](https://github.com/hashicorp/terraform-aws-vault/tree/master/examples/vault-cluster-public). + +![Vault architecture]() + +You will need to create an [Amazon Machine Image (AMI)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) +that has Vault installed, or bootstrap Vault upon launch with UserData. + +For more info on how the Vault cluster works, check out the [vault-cluster](https://github.com/hashicorp/terraform-aws-vault/tree/master/modules/vault-cluster) documentation. + +**Note**: To keep this example as simple to deploy and test as possible, it deploys the Vault cluster into your default +VPC and default subnets, some of which might be publicly accessible. This is OK for learning and experimenting, but for +production usage, we strongly recommend deploying the Vault cluster into the private subnets of a custom VPC. + + + + +## Quick start + +To deploy a Vault Cluster: + +1. `git clone` this repo to your computer. +1. Optional: build a Vault AMI. See the [vault-consul-ami example](https://github.com/hashicorp/terraform-aws-vault/tree/master/examples/vault-consul-ami) documentation for instructions on how to build an AMI that has both Vault and Consul installed (note that for this example, you'll only need Vault, but having both won't hurt anything). + +1. Install [Terraform](https://www.terraform.io/). +1. Open `vars.tf`, set the environment variables specified at the top of the file, and fill in any other variables that + don't have a default. If you built a custom AMI, put the AMI ID into the `ami_id` variable. Otherwise, one of our + public example AMIs will be used by default. These AMIs are great for learning/experimenting, but are NOT + recommended for production use. +1. Run `terraform init`. +1. Run `terraform apply`. +1. Run the [vault-examples-helper.sh script](https://github.com/hashicorp/terraform-aws-vault/tree/master/examples/vault-examples-helper/vault-examples-helper.sh) to + print out the IP addresses of the Vault servers and some example commands you can run to interact with the cluster: + `../vault-examples-helper/vault-examples-helper.sh`. + +To see how to connect to the Vault cluster, initialize it, and start reading and writing secrets, head over to the +[How do you use the Vault cluster?](https://github.com/hashicorp/terraform-aws-vault/tree/master/modules/vault-cluster#how-do-you-use-the-vault-cluster) docs. diff --git a/examples/vault-ddb-backend/main.tf b/examples/vault-ddb-backend/main.tf new file mode 100644 index 00000000..a9b3df74 --- /dev/null +++ b/examples/vault-ddb-backend/main.tf @@ -0,0 +1,74 @@ +# --------------------------------------------------------------------------------------------------------------------- +# DEPLOY A VAULT SERVER CLUSTER WITH DYNAMODB BACKEND IN AWS +# This is an example of how to use the vault-cluster module to deploy a Vault cluster in AWS. This cluster uses DynamoDB, +# running separately (built within the vault-cluster module), as its storage backend. +# --------------------------------------------------------------------------------------------------------------------- + +terraform { + required_version = ">= 0.9.3" +} + +# --------------------------------------------------------------------------------------------------------------------- +# DEPLOY THE VAULT SERVER CLUSTER +# --------------------------------------------------------------------------------------------------------------------- + +module "vault_cluster" { + # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you + # to a specific version of the modules, such as the following example: + # source = "github.com/hashicorp/terraform-aws-consul.git/modules/vault-cluster?ref=v0.0.1" + source = "../../modules/vault-cluster" + + cluster_name = "${var.vault_cluster_name}" + cluster_size = "${var.vault_cluster_size}" + instance_type = "${var.vault_instance_type}" + + ami_id = "${var.ami_id}" + user_data = "${data.template_file.user_data_vault_cluster.rendered}" + + enable_dynamo_backend = true + dynamo_table_name = "${var.dynamo_table_name}" + + vpc_id = "${data.aws_vpc.default.id}" + subnet_ids = "${data.aws_subnet_ids.default.ids}" + + # To make testing easier, we allow requests from any IP address here but in a production deployment, we *strongly* + # recommend you limit this to the IP address ranges of known, trusted servers inside your VPC. + + allowed_ssh_cidr_blocks = ["0.0.0.0/0"] + allowed_inbound_cidr_blocks = ["0.0.0.0/0"] + allowed_inbound_security_group_ids = [] + allowed_inbound_security_group_count = 0 + ssh_key_name = "${var.ssh_key_name}" +} + +# --------------------------------------------------------------------------------------------------------------------- +# THE USER DATA SCRIPT THAT WILL RUN ON EACH VAULT SERVER WHEN IT'S BOOTING +# This script will configure and start Vault +# --------------------------------------------------------------------------------------------------------------------- + +data "template_file" "user_data_vault_cluster" { + template = "${file("${path.module}/user-data-vault.sh")}" + + vars { + aws_region = "${data.aws_region.current.name}" + dynamo_table_name = "${var.dynamo_table_name}" + } +} + +# --------------------------------------------------------------------------------------------------------------------- +# DEPLOY THE CLUSTERS IN THE DEFAULT VPC AND AVAILABILITY ZONES +# Using the default VPC and subnets makes this example easy to run and test, but it means Consul and Vault are +# accessible from the public Internet. In a production deployment, we strongly recommend deploying into a custom VPC +# and private subnets. +# --------------------------------------------------------------------------------------------------------------------- + +data "aws_vpc" "default" { + default = "${var.vpc_id == "" ? true : false}" + id = "${var.vpc_id}" +} + +data "aws_subnet_ids" "default" { + vpc_id = "${data.aws_vpc.default.id}" +} + +data "aws_region" "current" {} diff --git a/examples/vault-ddb-backend/outputs.tf b/examples/vault-ddb-backend/outputs.tf new file mode 100644 index 00000000..3f83410d --- /dev/null +++ b/examples/vault-ddb-backend/outputs.tf @@ -0,0 +1,43 @@ +output "asg_name_vault_cluster" { + value = "${module.vault_cluster.asg_name}" +} + +output "launch_config_name_vault_cluster" { + value = "${module.vault_cluster.launch_config_name}" +} + +output "iam_role_arn_vault_cluster" { + value = "${module.vault_cluster.iam_role_arn}" +} + +output "iam_role_id_vault_cluster" { + value = "${module.vault_cluster.iam_role_id}" +} + +output "security_group_id_vault_cluster" { + value = "${module.vault_cluster.security_group_id}" +} + +output "aws_region" { + value = "${data.aws_region.current.name}" +} + +output "vault_servers_cluster_tag_key" { + value = "${module.vault_cluster.cluster_tag_key}" +} + +output "vault_servers_cluster_tag_value" { + value = "${module.vault_cluster.cluster_tag_value}" +} + +output "ssh_key_name" { + value = "${var.ssh_key_name}" +} + +output "vault_cluster_size" { + value = "${var.vault_cluster_size}" +} + +output "dynamo_table_arn" { + value = "${module.vault_cluster.dynamo_table_arn}" +} diff --git a/examples/vault-ddb-backend/user-data-vault.sh b/examples/vault-ddb-backend/user-data-vault.sh new file mode 100644 index 00000000..e2f4bcca --- /dev/null +++ b/examples/vault-ddb-backend/user-data-vault.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# This script is meant to be run in the User Data of each EC2 Instance while it's booting. The script uses the +# run-vault script to configure and start +# Vault in server mode. Note that this script assumes it's running in an AMI built from the Packer template in +# examples/vault-consul-ami/vault-consul.json. + +set -e + +# Send the log output from this script to user-data.log, syslog, and the console +# From: https://alestic.com/2010/12/ec2-user-data-output/ +exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 + +# The Packer template puts the TLS certs in these file paths +readonly VAULT_TLS_CERT_FILE="/opt/vault/tls/vault.crt.pem" +readonly VAULT_TLS_KEY_FILE="/opt/vault/tls/vault.key.pem" + +# The variables below are filled in via Terraform interpolation +/opt/vault/bin/run-vault --tls-cert-file "$VAULT_TLS_CERT_FILE" --tls-key-file "$VAULT_TLS_KEY_FILE" --enable-dynamo-backend --dynamo-table "${dynamo_table_name}" --dynamo-region "${aws_region}" diff --git a/examples/vault-ddb-backend/variables.tf b/examples/vault-ddb-backend/variables.tf new file mode 100644 index 00000000..97483c9d --- /dev/null +++ b/examples/vault-ddb-backend/variables.tf @@ -0,0 +1,51 @@ +# --------------------------------------------------------------------------------------------------------------------- +# ENVIRONMENT VARIABLES +# Define these secrets as environment variables +# --------------------------------------------------------------------------------------------------------------------- + +# AWS_ACCESS_KEY_ID +# AWS_SECRET_ACCESS_KEY +# AWS_DEFAULT_REGION + +# --------------------------------------------------------------------------------------------------------------------- +# REQUIRED PARAMETERS +# You must provide a value for each of these parameters. +# --------------------------------------------------------------------------------------------------------------------- + +variable "ami_id" { + description = "The ID of the AMI to run in the cluster. This should be an AMI built from the Packer template under examples/vault-consul-ami/vault-consul.json." +} + +variable "ssh_key_name" { + description = "The name of an EC2 Key Pair that can be used to SSH to the EC2 Instances in this cluster. Set to an empty string to not associate a Key Pair." +} + +# --------------------------------------------------------------------------------------------------------------------- +# OPTIONAL PARAMETERS +# These parameters have reasonable defaults. +# --------------------------------------------------------------------------------------------------------------------- + +variable "vault_cluster_name" { + description = "What to name the Vault server cluster and all of its associated resources" + default = "vault-ddb-example" +} + +variable "vault_cluster_size" { + description = "The number of Vault server nodes to deploy. We strongly recommend using 3 or 5." + default = 3 +} + +variable "vault_instance_type" { + description = "The type of EC2 Instance to run in the Vault ASG" + default = "t2.micro" +} + +variable "vpc_id" { + description = "The ID of the VPC to deploy into. Leave an empty string to use the Default VPC in this region." + default = "" +} + +variable "dynamo_table_name" { + description = "The name of an dynamo table to create and use as a storage backend (if configured). Note: Consul will not be configured" + default = "my-vault-table" +} diff --git a/examples/vault-s3-backend/main.tf b/examples/vault-s3-backend/main.tf index b5e9251b..7e0cfaea 100644 --- a/examples/vault-s3-backend/main.tf +++ b/examples/vault-s3-backend/main.tf @@ -79,11 +79,11 @@ data "template_file" "user_data_vault_cluster" { module "security_group_rules" { source = "github.com/hashicorp/terraform-aws-consul.git//modules/consul-client-security-group-rules?ref=v0.3.3" - security_group_id = "${module.vault_cluster.security_group_id}" + security_group_id = "${module.vault_cluster.security_group_id}" # To make testing easier, we allow requests from any IP address here but in a production deployment, we *strongly* # recommend you limit this to the IP address ranges of known, trusted servers inside your VPC. - + allowed_inbound_cidr_blocks = ["0.0.0.0/0"] } @@ -146,4 +146,4 @@ data "aws_subnet_ids" "default" { vpc_id = "${data.aws_vpc.default.id}" } -data "aws_region" "current" {} \ No newline at end of file +data "aws_region" "current" {} diff --git a/examples/vault-s3-backend/outputs.tf b/examples/vault-s3-backend/outputs.tf index e71de3a5..850bce74 100644 --- a/examples/vault-s3-backend/outputs.tf +++ b/examples/vault-s3-backend/outputs.tf @@ -84,4 +84,4 @@ output "consul_cluster_cluster_tag_value" { output "s3_bucket_arn" { value = "${module.vault_cluster.s3_bucket_arn}" -} \ No newline at end of file +} diff --git a/examples/vault-s3-backend/variables.tf b/examples/vault-s3-backend/variables.tf index ecea70ed..2ed6b2b0 100644 --- a/examples/vault-s3-backend/variables.tf +++ b/examples/vault-s3-backend/variables.tf @@ -73,4 +73,4 @@ variable "s3_bucket_name" { variable "force_destroy_s3_bucket" { description = "If you set this to true, when you run terraform destroy, this tells Terraform to delete all the objects in the S3 bucket used for backend storage (if configured). You should NOT set this to true in production or you risk losing all your data! This property is only here so automated tests of this module can clean up after themselves." default = false -} \ No newline at end of file +} diff --git a/main.tf b/main.tf index 245c57c6..851c62fa 100644 --- a/main.tf +++ b/main.tf @@ -117,11 +117,11 @@ data "template_file" "user_data_vault_cluster" { module "security_group_rules" { source = "github.com/hashicorp/terraform-aws-consul.git//modules/consul-client-security-group-rules?ref=v0.3.3" - security_group_id = "${module.vault_cluster.security_group_id}" + security_group_id = "${module.vault_cluster.security_group_id}" # To make testing easier, we allow requests from any IP address here but in a production deployment, we *strongly* # recommend you limit this to the IP address ranges of known, trusted servers inside your VPC. - + allowed_inbound_cidr_blocks = ["0.0.0.0/0"] } @@ -223,4 +223,4 @@ data "aws_subnet_ids" "default" { tags = "${var.subnet_tags}" } -data "aws_region" "current" {} \ No newline at end of file +data "aws_region" "current" {} diff --git a/modules/run-vault/README.md b/modules/run-vault/README.md index 97f03d04..9e14d0fe 100644 --- a/modules/run-vault/README.md +++ b/modules/run-vault/README.md @@ -63,9 +63,12 @@ The `run-vault` script accepts the following arguments: * `user` (optional): The user to run Vault as. Default is to use the owner of `config-dir`. * `skip-vault-config` (optional): If this flag is set, don't generate a Vault configuration file. This is useful if you have a custom configuration file and don't want to use any of of the default settings from `run-vault`. -* `--enable-s3-backend` (optional): If this flag is set, an S3 backend will be enabled in addition to the HA Consul backend. +* `--enable-s3-backend` (optional): Cannot be set with `--enable-dynamo-backend`. If this flag is set, an S3 backend will be enabled in addition to the HA Consul backend. * `--s3-bucket` (optional): Specifies the S3 bucket to use to store Vault data. Only used if `--enable-s3-backend` is set. * `--s3-bucket-region` (optional): Specifies the AWS region where `--s3-bucket` lives. Only used if `--enable-s3-backend` is set. +* `--enable-dynamo-backend` (optional): Cannot be set with `--enable-s3-backend`. If this flag is set, a DynamoDB backend will be enabled. Consul will __NOT__ be enabled as a backend. +* `--dynamo-table` (optional): Specifies the DynamoDB table to use to store Vault data. Only used if `--enable-dynamo-backend` is set. +* `--dynamo-region` (optional): Specifies the AWS region where `--dynamo-table` lives. Only used if `--enable-dynamo-backend` is set. Example: @@ -73,12 +76,17 @@ Example: /opt/vault/bin/run-vault --tls-cert-file /opt/vault/tls/vault.crt.pem --tls-key-file /opt/vault/tls/vault.key.pem ``` -Or if you want to enable an S3 backend: +If you want to enable an S3 backend: ``` /opt/vault/bin/run-vault --tls-cert-file /opt/vault/tls/vault.crt.pem --tls-key-file /opt/vault/tls/vault.key.pem --enable-s3-backend --s3-bucket my-vault-bucket --s3-bucket-region us-east-1 ``` +OR if you want to enable DynamoDB backend: + +``` +/opt/vault/bin/run-vault --tls-cert-file /opt/vault/tls/vault.crt.pem --tls-key-file /opt/vault/tls/vault.key.pem --enable-dynamo-backend --dynamo-table my-dynamo-table --dynamo-region us-east-1 +``` ## Vault configuration @@ -134,6 +142,14 @@ available. * [region](https://www.vaultproject.io/docs/configuration/storage/s3.html#region): Set to the `--s3-bucket-region` parameter. +* [storage](https://www.vaultproject.io/docs/configuration/index.html#storage): Set the `--enable-dynamo-backend` flag to + configure DynamoDB as the main (HA) storage backend for Vault: + + * [table](https://www.vaultproject.io/docs/configuration/storage/dynamodb.html#table): Set to the `--dynamo-table` + parameter. + * [region](https://www.vaultproject.io/docs/configuration/storage/dynamodb.html#region): Set to the `--dynamo-region` + parameter. + ### Overriding the configuration To override the default configuration, simply put your own configuration file in the Vault config folder (default: diff --git a/modules/run-vault/run-vault b/modules/run-vault/run-vault index e2097d04..7ab73ec3 100755 --- a/modules/run-vault/run-vault +++ b/modules/run-vault/run-vault @@ -35,7 +35,10 @@ function print_usage { echo -e " --skip-vault-config\tIf this flag is set, don't generate a Vault configuration file. Optional. Default is false." echo -e " --enable-s3-backend\tIf this flag is set, an S3 backend will be enabled in addition to the HA Consul backend. Default is false." echo -e " --s3-bucket\tSpecifies the S3 bucket to use to store Vault data. Only used if '--enable-s3-backend' is set." - echo -e " --s3-bucket-region\tSpecifies the AWS region where `--s3-bucket` lives. Only used if `--enable-s3-backend` is set." + echo -e " --s3-bucket-region\tSpecifies the AWS region where '--s3-bucket' lives. Only used if '--enable-s3-backend' is set." + echo -e " --enable-dynamo-backend\tIf this flag is set, DynamoDB will be enabled as the backend storage (HA)" + echo -e " --dynamo-region\tSpecifies the AWS region where --dynamo-table lives. Only used if '--enable-dynamo-backend is on'" + echo -e " --dynamo--table\tSpecifies the DynamoDB table to use for HA Storage. Only used if '--enable-dynamo-backend is on'" echo echo "Examples:" echo @@ -115,6 +118,10 @@ function generate_vault_config { local readonly enable_s3_backend="$8" local readonly s3_bucket="$9" local readonly s3_bucket_region="${10}" + local readonly enable_dynamo_backend="${11}" + local readonly dynamo_region="${12}" + local readonly dynamo_table="${13}" + local readonly config_path="$config_dir/$VAULT_CONFIG_FILE" local instance_ip_address @@ -122,6 +129,8 @@ function generate_vault_config { log_info "Creating default Vault config file in $config_path" local readonly listener_config=$(cat <> "$config_path" echo -e "$s3_config" >> "$config_path" echo -e "$consul_storage" >> "$config_path" + echo -e "$vault_storage" >> "$config_path" chown "$user:$user" "$config_path" } @@ -215,6 +248,9 @@ function run { local enable_s3_backend="false" local s3_bucket="" local s3_bucket_region="" + local enable_dynamo_backend="false" + local dynamo_region="" + local dynamo_table="" local all_args=() while [[ $# > 0 ]]; do @@ -283,6 +319,17 @@ function run { s3_bucket_region="$2" shift ;; + --enable-dynamo-backend) + enable_dynamo_backend="true" + ;; + --dynamo-region) + dynamo_region="$2" + shift + ;; + --dynamo-table) + dynamo_table="$2" + shift + ;; --help) print_usage exit @@ -305,6 +352,11 @@ function run { assert_not_empty "--s3-bucket-region" "$s3_bucket_region" fi + if [[ "$enable_dynamo_backend" == "true" ]]; then + assert_not_empty "--dynamo-table" "$dynamo_table" + assert_not_empty "--dynamo-region" "$dynamo_region" + fi + assert_is_installed "supervisorctl" assert_is_installed "aws" assert_is_installed "curl" @@ -337,7 +389,7 @@ function run { if [[ "$skip_vault_config" == "true" ]]; then log_info "The --skip-vault-config flag is set, so will not generate a default Vault config file." else - generate_vault_config "$tls_cert_file" "$tls_key_file" "$port" "$cluster_port" "$api_addr" "$config_dir" "$user" "$enable_s3_backend" "$s3_bucket" "$s3_bucket_region" + generate_vault_config "$tls_cert_file" "$tls_key_file" "$port" "$cluster_port" "$api_addr" "$config_dir" "$user" "$enable_s3_backend" "$s3_bucket" "$s3_bucket_region" "$enable_dynamo_backend" "$dynamo_region" "$dynamo_table" fi generate_supervisor_config "$SUPERVISOR_CONFIG_PATH" "$config_dir" "$bin_dir" "$log_dir" "$log_level" "$user" diff --git a/modules/vault-cluster/main.tf b/modules/vault-cluster/main.tf index 2c4b698f..500cd5cb 100644 --- a/modules/vault-cluster/main.tf +++ b/modules/vault-cluster/main.tf @@ -194,6 +194,30 @@ resource "aws_s3_bucket" "vault_storage" { } } +resource "aws_dynamodb_table" "vault_dynamo" { + count = "${var.enable_dynamo_backend ? 1 : 0}" + name = "${var.dynamo_table_name}" + hash_key = "Path" + range_key = "Key" + read_capacity = "${var.dynamo_read_capacity}" #Defaults to 5 + write_capacity = "${var.dynamo_write_capacity}" #Defaults to 5 + + attribute { + name = "Path" + type = "S" + } + + attribute { + name = "Key" + type = "S" + } + + tags { + Name = "${var.cluster_name}" + Description = "Used for HA storage with Vault." + } +} + resource "aws_iam_role_policy" "vault_s3" { count = "${var.enable_s3_backend ? 1 : 0}" name = "vault_s3" @@ -201,8 +225,16 @@ resource "aws_iam_role_policy" "vault_s3" { policy = "${element(concat(data.aws_iam_policy_document.vault_s3.*.json, list("")), 0)}" } +resource "aws_iam_role_policy" "vault_dynamo" { + count = "${var.enable_dynamo_backend ? 1 : 0}" + name = "vault_dynamo" + role = "${aws_iam_role.instance_role.id}" + policy = "${element(concat(data.aws_iam_policy_document.vault_dynamo.*.json, list("")), 0)}" +} + data "aws_iam_policy_document" "vault_s3" { - count = "${var.enable_s3_backend ? 1 : 0}" + count = "${var.enable_s3_backend ? 1 : 0}" + statement { effect = "Allow" actions = ["s3:*"] @@ -213,3 +245,16 @@ data "aws_iam_policy_document" "vault_s3" { ] } } + +data "aws_iam_policy_document" "vault_dynamo" { + count = "${var.enable_dynamo_backend ? 1 : 0}" + + statement { + effect = "Allow" + actions = ["dynamodb:*"] + + resources = [ + "${aws_dynamodb_table.vault_dynamo.arn}", + ] + } +} diff --git a/modules/vault-cluster/outputs.tf b/modules/vault-cluster/outputs.tf index 1acf4b26..7701dd69 100644 --- a/modules/vault-cluster/outputs.tf +++ b/modules/vault-cluster/outputs.tf @@ -32,4 +32,8 @@ output "security_group_id" { output "s3_bucket_arn" { value = "${join(",", aws_s3_bucket.vault_storage.*.arn)}" -} \ No newline at end of file +} + +output "dynamo_table_arn" { + value = "${element(concat(aws_dynamodb_table.vault_dynamo.*.arn, list("")), 0)}" +} diff --git a/modules/vault-cluster/variables.tf b/modules/vault-cluster/variables.tf index 4533011f..0bcda701 100644 --- a/modules/vault-cluster/variables.tf +++ b/modules/vault-cluster/variables.tf @@ -176,12 +176,32 @@ variable "enable_s3_backend" { default = false } +variable "enable_dynamo_backend" { + description = "Whether to configure a DynamoDB storage backend (No Consul)" + default = false +} + variable "s3_bucket_name" { description = "The name of the S3 bucket to create and use as a storage backend. Only used if 'enable_s3_backend' is set to true." default = "" } +variable "dynamo_table_name" { + description = "The name of the Dynamo Table to create and use as a storage backend. Only used if 'enable_dynamo_backend' is set to true." + default = "" +} + variable "force_destroy_s3_bucket" { description = "If 'configure_s3_backend' is enabled and you set this to true, when you run terraform destroy, this tells Terraform to delete all the objects in the S3 bucket used for backend storage. You should NOT set this to true in production or you risk losing all your data! This property is only here so automated tests of this module can clean up after themselves. Only used if 'enable_s3_backend' is set to true." default = false } + +variable "dynamo_read_capacity" { + description = "Sets the DynamoDB read capacity for storage backend" + default = 5 +} + +variable "dynamo_write_capacity" { + description = "Sets the DynamoDB write capacity for storage backend" + default = 5 +} diff --git a/modules/vault-elb/main.tf b/modules/vault-elb/main.tf index 1d6b39aa..502acc45 100644 --- a/modules/vault-elb/main.tf +++ b/modules/vault-elb/main.tf @@ -19,8 +19,8 @@ resource "aws_elb" "vault" { connection_draining = "${var.connection_draining}" connection_draining_timeout = "${var.connection_draining_timeout}" - security_groups = ["${aws_security_group.vault.id}"] - subnets = ["${var.subnet_ids}"] + security_groups = ["${aws_security_group.vault.id}"] + subnets = ["${var.subnet_ids}"] # Run the ELB in TCP passthrough mode listener {