Skip to content
This repository has been archived by the owner on Jan 25, 2023. It is now read-only.

Commit

Permalink
Merge pull request #232 from anouarchattouna/fixing_tests_using_consu…
Browse files Browse the repository at this point in the history
…l_for_dns

Fixing tests using consul for dns
  • Loading branch information
brikis98 committed Feb 16, 2021
2 parents a0bc093 + c3bd168 commit f1ac700
Show file tree
Hide file tree
Showing 15 changed files with 234 additions and 45 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
@@ -1,6 +1,6 @@
repos:
- repo: https://github.com/gruntwork-io/pre-commit
rev: v0.1.10
rev: v0.1.12
hooks:
- id: terraform-fmt
- id: gofmt
6 changes: 3 additions & 3 deletions examples/vault-consul-ami/vault-consul.json
@@ -1,10 +1,10 @@
{
"min_packer_version": "0.12.0",
"min_packer_version": "1.5.4",
"variables": {
"aws_region": "us-east-1",
"vault_version": "1.5.4",
"vault_version": "1.6.1",
"consul_module_version": "v0.8.0",
"consul_version": "1.5.3",
"consul_version": "1.9.2",
"consul_download_url": "{{env `CONSUL_DOWNLOAD_URL`}}",
"vault_download_url": "{{env `VAULT_DOWNLOAD_URL`}}",
"install_auth_signing_script": "true",
Expand Down
18 changes: 17 additions & 1 deletion examples/vault-dynamodb-backend/main.tf
Expand Up @@ -9,7 +9,7 @@ terraform {
}

# ---------------------------------------------------------------------------------------------------------------------
# DEPLOY THE VAULT SERVER CLUSTER
# DEPLOY THE DYNAMODB STORAGE BACKEND
# ---------------------------------------------------------------------------------------------------------------------

module "backend" {
Expand All @@ -19,6 +19,10 @@ module "backend" {
write_capacity = var.dynamo_write_capacity
}

# ---------------------------------------------------------------------------------------------------------------------
# DEPLOY THE VAULT SERVER CLUSTER
# ---------------------------------------------------------------------------------------------------------------------

module "vault_cluster" {
# When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you
# to a specific version of the modules, such as the following example:
Expand All @@ -32,6 +36,11 @@ module "vault_cluster" {
ami_id = var.ami_id
user_data = data.template_file.user_data_vault_cluster.rendered

# Enable S3 storage backend
enable_s3_backend = true
s3_bucket_name = var.s3_bucket_name
force_destroy_s3_bucket = var.force_destroy_s3_bucket

vpc_id = data.aws_vpc.default.id
subnet_ids = data.aws_subnet_ids.default.ids

Expand All @@ -44,16 +53,23 @@ module "vault_cluster" {
allowed_inbound_security_group_count = 0
ssh_key_name = var.ssh_key_name

# Enable DynamoDB high availability storage backend
enable_dynamo_backend = true
dynamo_table_name = var.dynamo_table_name
}

# ---------------------------------------------------------------------------------------------------------------------
# THE USER DATA SCRIPT THAT WILL RUN ON EACH VAULT SERVER WHEN IT'S BOOTING
# This script will configure and start Vault
# ---------------------------------------------------------------------------------------------------------------------

data "template_file" "user_data_vault_cluster" {
template = file("${path.module}/user-data-vault.sh")

vars = {
aws_region = data.aws_region.current.name
dynamo_table_name = var.dynamo_table_name
s3_bucket_name = var.s3_bucket_name
}
}

Expand Down
5 changes: 4 additions & 1 deletion examples/vault-dynamodb-backend/user-data-vault.sh
Expand Up @@ -20,4 +20,7 @@ readonly VAULT_TLS_KEY_FILE="/opt/vault/tls/vault.key.pem"
--dynamo-table "${dynamo_table_name}" \
--dynamo-region "${aws_region}" \
--tls-cert-file "$VAULT_TLS_CERT_FILE" \
--tls-key-file "$VAULT_TLS_KEY_FILE"
--tls-key-file "$VAULT_TLS_KEY_FILE" \
--enable-s3-backend \
--s3-bucket "${s3_bucket_name}" \
--s3-bucket-region "${aws_region}"
12 changes: 12 additions & 0 deletions examples/vault-dynamodb-backend/variables.tf
Expand Up @@ -65,3 +65,15 @@ variable "dynamo_write_capacity" {
description = "Sets the DynamoDB write capacity for storage backend"
default = 5
}

variable "s3_bucket_name" {
description = "The name of an S3 bucket to create and use as a storage backend (if configured). Note: S3 bucket names must be *globally* unique."
type = string
default = "my-vault-bucket"
}

variable "force_destroy_s3_bucket" {
description = "If you set this to true, when you run terraform destroy, this tells Terraform to delete all the objects in the S3 bucket used for backend storage (if configured). You should NOT set this to true in production or you risk losing all your data! This property is only here so automated tests of this module can clean up after themselves."
type = bool
default = false
}
1 change: 0 additions & 1 deletion examples/vault-s3-backend/variables.tf
Expand Up @@ -86,4 +86,3 @@ variable "force_destroy_s3_bucket" {
type = bool
default = false
}

40 changes: 32 additions & 8 deletions modules/run-vault/run-vault
Expand Up @@ -14,6 +14,8 @@ readonly DEFAULT_AGENT_AUTH_MOUNT_PATH="auth/aws"
readonly DEFAULT_PORT=8200
readonly DEFAULT_LOG_LEVEL="info"

readonly DEFAULT_CONSUL_AGENT_SERVICE_REGISTRATION_ADDRESS="localhost:8500"

readonly EC2_INSTANCE_METADATA_URL="http://169.254.169.254/latest/meta-data"

readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
Expand Down Expand Up @@ -44,6 +46,7 @@ function print_usage {
echo -e " --s3-bucket\tSpecifies the S3 bucket to use to store Vault data. Only used if '--enable-s3-backend' is set."
echo -e " --s3-bucket-path\tSpecifies the S3 bucket path to use to store Vault data. Only used if '--enable-s3-backend' is set."
echo -e " --s3-bucket-region\tSpecifies the AWS region where '--s3-bucket' lives. Only used if '--enable-s3-backend' is set."
echo -e " --consul-agent-service-registration-address\tSpecifies the address of the Consul agent to communicate with when using a different storage backend, in this case an S3 backend. Only used if '--enable-s3-backend' is set. Default is ${DEFAULT_CONSUL_AGENT_SERVICE_REGISTRATION_ADDRESS}."
echo -e " --enable-dynamo-backend\tIf this flag is set, DynamoDB will be enabled as the backend storage (HA)"
echo -e " --dynamo-region\tSpecifies the AWS region where --dynamo-table lives. Only used if '--enable-dynamo-backend is on'"
echo -e " --dynamo--table\tSpecifies the DynamoDB table to use for HA Storage. Only used if '--enable-dynamo-backend is on'"
Expand Down Expand Up @@ -237,13 +240,14 @@ function generate_vault_config {
local -r s3_bucket="$9"
local -r s3_bucket_path="${10}"
local -r s3_bucket_region="${11}"
local -r enable_dynamo_backend="${12}"
local -r dynamo_region="${13}"
local -r dynamo_table="${14}"
local -r enable_auto_unseal="${15}"
local -r auto_unseal_kms_key_id="${16}"
local -r auto_unseal_kms_key_region="${17}"
local -r auto_unseal_endpoint="${18}"
local -r consul_agent_service_registration_address="${12}"
local -r enable_dynamo_backend="${13}"
local -r dynamo_region="${14}"
local -r dynamo_table="${15}"
local -r enable_auto_unseal="${16}"
local -r auto_unseal_kms_key_id="${17}"
local -r auto_unseal_kms_key_region="${18}"
local -r auto_unseal_endpoint="${19}"
local -r config_path="$config_dir/$VAULT_CONFIG_FILE"

local instance_ip_address
Expand Down Expand Up @@ -288,6 +292,7 @@ EOF
local dynamodb_storage_type="storage"
local s3_config=""
local vault_storage_backend=""
local service_registration=""
if [[ "$enable_s3_backend" == "true" ]]; then
s3_config=$(cat <<EOF
storage "s3" {
Expand All @@ -299,9 +304,14 @@ EOF
)
consul_storage_type="ha_storage"
dynamodb_storage_type="ha_storage"
service_registration=$(cat <<EOF
service_registration "consul" {
address = "${consul_agent_service_registration_address}"
}\n
EOF
)
fi


if [[ "$enable_dynamo_backend" == "true" ]]; then
vault_storage_backend=$(cat <<EOF
$dynamodb_storage_type "dynamodb" {
Expand Down Expand Up @@ -335,6 +345,7 @@ EOF
echo -e "$listener_config" >> "$config_path"
echo -e "$s3_config" >> "$config_path"
echo -e "$vault_storage_backend" >> "$config_path"
echo -e "$service_registration" >> "$config_path"

chown "$user:$user" "$config_path"
}
Expand Down Expand Up @@ -368,6 +379,8 @@ Documentation=https://www.vaultproject.io/docs/
Requires=network-online.target
After=network-online.target
ConditionFileNotEmpty=$config_path
StartLimitIntervalSec=60
StartLimitBurst=3
EOF
)
Expand All @@ -392,9 +405,12 @@ KillSignal=SIGINT
Restart=on-failure
RestartSec=5
TimeoutStopSec=30
StartLimitInterval=60
StartLimitIntervalSec=60
StartLimitBurst=3
LimitNOFILE=65536
LimitMEMLOCK=infinity
EOF
)

Expand Down Expand Up @@ -449,6 +465,7 @@ function run {
local s3_bucket=""
local s3_bucket_path=""
local s3_bucket_region=""
local consul_agent_service_registration_address="${DEFAULT_CONSUL_AGENT_SERVICE_REGISTRATION_ADDRESS}"
local enable_dynamo_backend="false"
local dynamo_region=""
local dynamo_table=""
Expand Down Expand Up @@ -547,6 +564,11 @@ function run {
s3_bucket_region="$2"
shift
;;
--consul-agent-service-registration-address)
assert_not_empty "$key" "$2"
consul_agent_service_registration_address="$2"
shift
;;
--enable-dynamo-backend)
enable_dynamo_backend="true"
;;
Expand Down Expand Up @@ -639,6 +661,7 @@ function run {
if [[ "$enable_s3_backend" == "true" ]]; then
assert_not_empty "--s3-bucket" "$s3_bucket"
assert_not_empty "--s3-bucket-region" "$s3_bucket_region"
assert_not_empty "--consul-agent-service-registration-address" "${consul_agent_service_registration_address}"
fi
fi

Expand Down Expand Up @@ -714,6 +737,7 @@ function run {
"$s3_bucket" \
"$s3_bucket_path" \
"$s3_bucket_region" \
"${consul_agent_service_registration_address}" \
"$enable_dynamo_backend" \
"$dynamo_region" \
"$dynamo_table" \
Expand Down
3 changes: 3 additions & 0 deletions test/go.sum
Expand Up @@ -197,6 +197,8 @@ github.com/gruntwork-io/gruntwork-cli v0.5.1 h1:mVmVsFubUSLSCO8bGigI63HXzvzkC0uW
github.com/gruntwork-io/gruntwork-cli v0.5.1/go.mod h1:IBX21bESC1/LGoV7jhXKUnTQTZgQ6dYRsoj/VqxUSZQ=
github.com/gruntwork-io/terratest v0.28.15 h1:in1DRBq8/RjxMyb6Amr1SRrczOK/hGnPi+gQXOOtbZI=
github.com/gruntwork-io/terratest v0.28.15/go.mod h1:PkVylPuUNmItkfOTwSiFreYA4FkanK8AluBuNeGxQOw=
github.com/gruntwork-io/terratest v0.32.1 h1:Uho3H7VWD4tEulWov7pWW90V3XATLKxSh88AtrxTYvU=
github.com/gruntwork-io/terratest v0.32.3 h1:GSe/mkSQe0rD7Z92NKTUjDKg2FBuy0w82Ttd5gcK7kU=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
Expand Down Expand Up @@ -362,6 +364,7 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
Expand Down
8 changes: 5 additions & 3 deletions test/vault_cluster_dynamodb_backend_test.go
Expand Up @@ -20,7 +20,6 @@ const VAR_DYNAMO_TABLE_NAME = "dynamo_table_name"
// 3. Deploy that AMI using the example Terraform code
// 4. SSH to a Vault node and initialize the Vault cluster
// 5. SSH to each Vault node and unseal it
// 6. Connect to the Vault cluster via the ELB
func runVaultWithDynamoBackendClusterTest(t *testing.T, amiId string, awsRegion, sshUserName string) {
examplesDir := test_structure.CopyTerraformFolderToTemp(t, REPO_ROOT, VAULT_CLUSTER_DYNAMODB_BACKEND_PATH)

Expand All @@ -36,10 +35,13 @@ func runVaultWithDynamoBackendClusterTest(t *testing.T, amiId string, awsRegion,
})

test_structure.RunTestStage(t, "deploy", func() {
uniqueId := random.UniqueId()
terraformVars := map[string]interface{}{
VAR_DYNAMO_TABLE_NAME: fmt.Sprintf("vault-dynamo-test-%s", random.UniqueId()),
VAR_DYNAMO_TABLE_NAME: fmt.Sprintf("vault-dynamo-test-%s", uniqueId),
VAR_S3_BUCKET_NAME: s3BucketName(uniqueId),
VAR_FORCE_DESTROY_S3_BUCKET: true,
}
deployCluster(t, amiId, awsRegion, examplesDir, random.UniqueId(), terraformVars)
deployCluster(t, amiId, awsRegion, examplesDir, uniqueId, terraformVars)
})

test_structure.RunTestStage(t, "validate", func() {
Expand Down
9 changes: 8 additions & 1 deletion test/vault_cluster_enterprise_test.go
Expand Up @@ -59,11 +59,18 @@ func runVaultEnterpriseClusterTest(t *testing.T, amiId string, awsRegion string,
deployCluster(t, amiId, awsRegion, examplesDir, uniqueId, terraformVars)
})

test_structure.RunTestStage(t, "initialize_unseal", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, examplesDir)
keyPair := test_structure.LoadEc2KeyPair(t, examplesDir)

initializeAndUnsealVaultCluster(t, OUTPUT_VAULT_CLUSTER_ASG_NAME, sshUserName, terraformOptions, awsRegion, keyPair)
})

test_structure.RunTestStage(t, "validate", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, examplesDir)
keyPair := test_structure.LoadEc2KeyPair(t, examplesDir)

cluster := initializeAndUnsealVaultCluster(t, OUTPUT_VAULT_CLUSTER_ASG_NAME, sshUserName, terraformOptions, awsRegion, keyPair)
cluster := getInitializedAndUnsealedVaultCluster(t, OUTPUT_VAULT_CLUSTER_ASG_NAME, sshUserName, terraformOptions, awsRegion, keyPair)
testVaultUsesConsulForDns(t, cluster)
checkEnterpriseInstall(t, OUTPUT_VAULT_CLUSTER_ASG_NAME, sshUserName, terraformOptions, awsRegion, keyPair)
})
Expand Down
11 changes: 9 additions & 2 deletions test/vault_cluster_private_test.go
Expand Up @@ -39,14 +39,21 @@ func runVaultPrivateClusterTest(t *testing.T, amiId string, awsRegion string, ss
VAR_CONSUL_CLUSTER_NAME: fmt.Sprintf("consul-test-%s", uniqueId),
VAR_CONSUL_CLUSTER_TAG_KEY: fmt.Sprintf("consul-test-%s", uniqueId),
}
deployCluster(t, amiId, awsRegion, examplesDir, random.UniqueId(), terraformVars)
deployCluster(t, amiId, awsRegion, examplesDir, uniqueId, terraformVars)
})

test_structure.RunTestStage(t, "initialize_unseal", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, examplesDir)
keyPair := test_structure.LoadEc2KeyPair(t, examplesDir)

initializeAndUnsealVaultCluster(t, OUTPUT_VAULT_CLUSTER_ASG_NAME, sshUserName, terraformOptions, awsRegion, keyPair)
})

test_structure.RunTestStage(t, "validate", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, examplesDir)
keyPair := test_structure.LoadEc2KeyPair(t, examplesDir)

cluster := initializeAndUnsealVaultCluster(t, OUTPUT_VAULT_CLUSTER_ASG_NAME, sshUserName, terraformOptions, awsRegion, keyPair)
cluster := getInitializedAndUnsealedVaultCluster(t, OUTPUT_VAULT_CLUSTER_ASG_NAME, sshUserName, terraformOptions, awsRegion, keyPair)
testVaultUsesConsulForDns(t, cluster)
})
}
13 changes: 11 additions & 2 deletions test/vault_cluster_public_test.go
Expand Up @@ -23,6 +23,7 @@ const VAULT_CLUSTER_PUBLIC_VAR_VAULT_DOMAIN_NAME = "vault_domain_name"
// 4. SSH to a Vault node and initialize the Vault cluster
// 5. SSH to each Vault node and unseal it
// 6. Connect to the Vault cluster via the ELB
// 7. SSH to a Vault node and make sure you can communicate with the nodes via Consul-managed DNS
func runVaultPublicClusterTest(t *testing.T, amiId string, awsRegion string, sshUserName string) {
examplesDir := test_structure.CopyTerraformFolderToTemp(t, REPO_ROOT, ".")

Expand All @@ -46,14 +47,22 @@ func runVaultPublicClusterTest(t *testing.T, amiId string, awsRegion string, ssh
VAR_CONSUL_CLUSTER_NAME: fmt.Sprintf("consul-test-%s", uniqueId),
VAR_CONSUL_CLUSTER_TAG_KEY: fmt.Sprintf("consul-test-%s", uniqueId),
}
deployCluster(t, amiId, awsRegion, examplesDir, random.UniqueId(), terraformVars)
deployCluster(t, amiId, awsRegion, examplesDir, uniqueId, terraformVars)
})

test_structure.RunTestStage(t, "validate", func() {
test_structure.RunTestStage(t, "initialize_unseal", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, examplesDir)
keyPair := test_structure.LoadEc2KeyPair(t, examplesDir)

initializeAndUnsealVaultCluster(t, OUTPUT_VAULT_CLUSTER_ASG_NAME, sshUserName, terraformOptions, awsRegion, keyPair)
})

test_structure.RunTestStage(t, "validate", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, examplesDir)
keyPair := test_structure.LoadEc2KeyPair(t, examplesDir)

cluster := getInitializedAndUnsealedVaultCluster(t, OUTPUT_VAULT_CLUSTER_ASG_NAME, sshUserName, terraformOptions, awsRegion, keyPair)
testVaultViaElb(t, terraformOptions)
testVaultUsesConsulForDns(t, cluster)
})
}
16 changes: 9 additions & 7 deletions test/vault_cluster_s3_backend_test.go
Expand Up @@ -21,7 +21,7 @@ const VAR_FORCE_DESTROY_S3_BUCKET = "force_destroy_s3_bucket"
// 3. Deploy that AMI using the example Terraform code
// 4. SSH to a Vault node and initialize the Vault cluster
// 5. SSH to each Vault node and unseal it
// 6. Connect to the Vault cluster via the ELB
// 6. SSH to a Vault node and make sure you can communicate with the nodes via Consul-managed DNS
func runVaultWithS3BackendClusterTest(t *testing.T, amiId string, awsRegion, sshUserName string) {
examplesDir := test_structure.CopyTerraformFolderToTemp(t, REPO_ROOT, VAULT_CLUSTER_S3_BACKEND_PATH)

Expand All @@ -47,16 +47,18 @@ func runVaultWithS3BackendClusterTest(t *testing.T, amiId string, awsRegion, ssh
deployCluster(t, amiId, awsRegion, examplesDir, uniqueId, terraformVars)
})

test_structure.RunTestStage(t, "validate", func() {
test_structure.RunTestStage(t, "initialize_unseal", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, examplesDir)
keyPair := test_structure.LoadEc2KeyPair(t, examplesDir)

initializeAndUnsealVaultCluster(t, OUTPUT_VAULT_CLUSTER_ASG_NAME, sshUserName, terraformOptions, awsRegion, keyPair)
})

test_structure.RunTestStage(t, "validate", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, examplesDir)
keyPair := test_structure.LoadEc2KeyPair(t, examplesDir)

// TODO: temporarily disable DNS check until https://github.com/hashicorp/terraform-aws-consul/issues/155 is
// fixed. See https://github.com/hashicorp/terraform-aws-vault/pull/222 for details.
//
// cluster := initializeAndUnsealVaultCluster(t, OUTPUT_VAULT_CLUSTER_ASG_NAME, sshUserName, terraformOptions, awsRegion, nil)
// testVaultUsesConsulForDns(t, cluster)
cluster := getInitializedAndUnsealedVaultCluster(t, OUTPUT_VAULT_CLUSTER_ASG_NAME, sshUserName, terraformOptions, awsRegion, keyPair)
testVaultUsesConsulForDns(t, cluster)
})
}

0 comments on commit f1ac700

Please sign in to comment.