Skip to content
Permalink
Browse files

Added support for secrets (#61)

* Added support for secrets

The module now supports an extra parameter `container_secrets`, similar to
`container_envvars`.

Values in this map will be treated as SSM keys and the actual value
injected into the environment of the container will be pulled from SSM.
To work, the SSM keys must also be added to `ssm_paths`.

Example:

```
container_secrets = {
  DB_PASSWORD = "myapp/dev/db.password"
}c
ssm_paths         = ["myapp/dev"]
```

The example above injects the (decrypted) contents of
`/myapp/dev/db.password` into the env. variable `DB_PASSWORD`.

* Renamed the toggle to enable container secrets

* Dynamically generate the Lambda ZIP file.

* Fix support for docker labels

* Added a hash of secret keys as a Docker label

* Fix whitespace

* Updated example to work with the newest version of Airship (and Windows)

* Updated NLB example to serve as a reusable module in other examples

As part of that, I removed the security group rules, since they only
replicated the default behaviour of the default SG, but added some
brittleness in the step that discovers the NLB's ip addresses.

* Added an example of a service with secrets

* Added documentation
  • Loading branch information...
mhvelplund authored and Jamie-BitFlight committed Jun 13, 2019
1 parent f533e1b commit aafb884f97934c317f3b7b6bc9f004d7cc3fae40
@@ -1,2 +1,6 @@
**/*.tfstate*
**/.terraform
**/.terraform

# Dynamically created by data.archive_file
modules/live_task_lookup/lookup.zip
modules/lambda_ecs_task_scheduler/ecs_task_scheduler.zip
@@ -64,7 +64,7 @@ resource "aws_security_group" "lb_sg" {
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags {
tags = {
Name = "load-balancer-sg"
}
}
@@ -92,7 +92,7 @@ resource "aws_security_group" "ecs_service_sg" {
cidr_blocks = ["0.0.0.0/0"]
}
tags {
tags = {
Name = "fargate-ecs-service-sg"
}
}
@@ -16,7 +16,8 @@ data "aws_security_group" "selected" {
}

module "ecs_cluster" {
source = "git::git@github.com:blinkist/terraform-aws-airship-ecs-cluster.git?ref=master"
source = "blinkist/airship-ecs-cluster/aws"
version = "~> 0.5"

name = "${terraform.workspace}-cluster"

@@ -57,40 +58,13 @@ resource "aws_route53_zone" "this" {
name = "some.zonename.com"
}

resource "aws_security_group_rule" "allow_all" {
type = "ingress"
from_port = "${var.echo_port}"
to_port = "${var.echo_port}"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = "${data.aws_security_group.selected.id}"
}

data "aws_network_interface" "nlb" {
depends_on = ["aws_lb.this"]

filter = {
name = "subnet-id"
values = ["${data.aws_subnet.selected.id}"]
}
}

resource "aws_security_group_rule" "allow_ecs" {
type = "ingress"
from_port = "32768"
to_port = "65535"
protocol = "tcp"
cidr_blocks = ["${formatlist("%s/32",sort(distinct(compact(concat(list(""),data.aws_network_interface.nlb.private_ips)))))}"]
security_group_id = "${data.aws_security_group.selected.id}"
}

data "http" "icanhazip" {
url = "http://ipv4.icanhazip.com"
}

resource "aws_security_group_rule" "allow_user" {
type = "ingress"
from_port = "32768"
from_port = "0"
to_port = "65535"
protocol = "tcp"
cidr_blocks = ["${format("%s/%s",trimspace(data.http.icanhazip.body), "32")}"]
@@ -154,18 +128,16 @@ module "nlb_service" {
# load_balancing_type is either "none", "network","application"
load_balancing_type = "network"

lb_arn = "${aws_lb.this.arn}"

cognito_auth_enabled = false
route53_record_type = "ALIAS"

## load_balancing_properties map defines the map for services hooked to a load balancer
load_balancing_properties_route53_zone_id = "${aws_route53_zone.this.zone_id}"
load_balancing_properties_route53_name = "service-web"
load_balancing_properties_route53_custom_name = "service-web"
load_balancing_properties_lb_vpc_id = "${data.aws_vpc.selected.id}"
load_balancing_properties_target_group_port = "${var.echo_port}"
load_balancing_properties_nlb_listener_port = "${var.echo_port}"
load_balancing_properties_deregistration_delay = 0
load_balancing_properties_lb_arn = "${aws_lb.this.arn}"
load_balancing_properties_cognito_auth_enabled = false
load_balancing_properties_route53_record_type = "ALIAS"

# deployment_controller_type sets the deployment type
# ECS for Rolling update, and CODE_DEPLOY for Blue/Green deployment via CodeDeploy
@@ -183,7 +155,7 @@ module "nlb_service" {
}

# Test that create false works
module "nlb_service" {
module "nlb_service_ignored" {
source = "../../"

create = false
@@ -221,25 +193,22 @@ module "nlb_service" {
# load_balancing_type is either "none", "network","application"
load_balancing_type = "network"

lb_arn = "${aws_lb.this.arn}"

cognito_auth_enabled = false
route53_record_type = "ALIAS"

## load_balancing_properties map defines the map for services hooked to a load balancer
load_balancing_properties_route53_zone_id = "${aws_route53_zone.this.zone_id}"
load_balancing_properties_route53_name = "service-web"
load_balancing_properties_route53_custom_name = "service-web"
load_balancing_properties_lb_vpc_id = "${data.aws_vpc.selected.id}"
load_balancing_properties_target_group_port = "${var.echo_port}"
load_balancing_properties_nlb_listener_port = "${var.echo_port}"
load_balancing_properties_deregistration_delay = 0
load_balancing_properties_lb_arn = "${aws_lb.this.arn}"
load_balancing_properties_cognito_auth_enabled = false
load_balancing_properties_route53_record_type = "ALIAS"

# deployment_controller_type sets the deployment type
# ECS for Rolling update, and CODE_DEPLOY for Blue/Green deployment via CodeDeploy
deployment_controller_type = "ECS"

## capacity_properties map defines the capacity properties of the service
capacity_properties = {}
force_bootstrap_container_image = "false"

# Whether to provide access to the supplied kms_keys. If no kms keys are
@@ -29,3 +29,15 @@ output "lb_address" {
output "has_changed" {
value = "${module.nlb_service.has_changed}"
}

output "cluster_id" {
value = "${module.ecs_cluster.cluster_id}"
}

output "zone_id" {
value = "${aws_route53_zone.this.zone_id}"
}

output "lb_arn" {
value = "${aws_lb.this.arn}"
}
@@ -1,5 +1,5 @@
terraform {
required_version = "< 0.12"
required_version = "~> 0.11"
}

provider "aws" {
@@ -8,15 +8,15 @@ provider "aws" {
skip_metadata_api_check = true
skip_region_validation = true
skip_credentials_validation = true
version = "> 1.57"
version = "~> 2.14"
}

variable "region" {
default = "ap-southeast-2"
}

provider "http" {
version = "~> 1.0"
version = "~> 1.1"
}

provider "null" {
@@ -26,3 +26,7 @@ provider "null" {
provider "template" {
version = "~> 2.1"
}

provider "archive" {
version = "~> 1.2"
}
@@ -0,0 +1,8 @@
@ECHO OFF

REM What is this you ask? Someone put a "sleep 30" in the ECS cluster module, which doesn't work on Windows.
REM Having this in the same folder makes it Terraform run it transparently on Windows "fixing" the issue.
REM Full credit goes to Fæster@JPPOL for the hack to fix the hack.

REM Waits %1 -1 seconds ...
ping 127.0.0.1 -n %1
@@ -0,0 +1,76 @@
terraform {
required_version = "~> 0.11"
}

locals {
region = "eu-west-1"
}

provider "aws" {
region = "${local.region}"
skip_get_ec2_platforms = true
skip_metadata_api_check = true
skip_region_validation = true
skip_credentials_validation = true
version = "~> 2.14"
}

locals {
remote_account_id = "${data.aws_caller_identity.current.account_id}"
}

data "aws_caller_identity" "current" {}

data "aws_vpc" "selected" {
default = true
}

resource "aws_ssm_parameter" "user" {
name = "/myapp/dev/db.user"
type = "String"
value = "jdoe"
}

resource "aws_ssm_parameter" "password" {
name = "/myapp/dev/db.password"
type = "SecureString"
value = "CorrectHorseBatteryStaple"
}

module "ecs-base" {
source = "../with_nlb"
region = "${local.region}"
}

module "secret_service" {
source = "../../"

name = "${terraform.workspace}-secrets"
ecs_cluster_id = "${module.ecs-base.cluster_id}"
region = "${local.region}"
bootstrap_container_image = "nginx:stable"
container_cpu = 256
container_memory = 128
container_port = 80
load_balancing_type = "network"
load_balancing_properties_route53_zone_id = "${module.ecs-base.zone_id}"
load_balancing_properties_lb_vpc_id = "${data.aws_vpc.selected.id}"
load_balancing_properties_nlb_listener_port = 80
load_balancing_properties_lb_arn = "${module.ecs-base.lb_arn}"

# Enable container secrets and add two parameters. The first is stored in a "remote" account, the other is stored locally.
container_secrets_enabled = true

container_secrets = {
DB_USER = "arn:aws:ssm:${local.region}:${local.remote_account_id}:parameter/myapp/dev/db.user"
DB_PASSWORD = "/myapp/dev/db.password"
}

# Give the service access to SSM. Note that for remote accounts, you can't control access with ssm_paths
ssm_enabled = true
ssm_paths = ["myapp/dev"]

tags = {
Environment = "${terraform.workspace}"
}
}
@@ -0,0 +1,8 @@
@ECHO OFF

REM What is this you ask? Someone put a "sleep 30" in the ECS cluster module, which doesn't work on Windows.
REM Having this in the same folder makes it Terraform run it transparently on Windows "fixing" the issue.
REM Full credit goes to Fæster@JPPOL for the hack to fix the hack.

REM Waits %1 -1 seconds ...
ping 127.0.0.1 -n %1
@@ -49,6 +49,9 @@ module "iam" {

# In case Fargate is enabled an extra role needs to be added
fargate_enabled = "${var.fargate_enabled}"

# The container uses secrets and needs a task execution role to get access to them
container_secrets_enabled = "${var.container_secrets_enabled}"
}

#
@@ -184,6 +187,7 @@ module "container_definition" {
hostname = "${var.awsvpc_enabled == 1 ? "" : var.name}"

container_envvars = "${var.container_envvars}"
container_secrets = "${var.container_secrets}"

container_docker_labels = "${var.container_docker_labels}"

@@ -269,6 +273,7 @@ module "ecs_task_definition_selector" {
live_aws_ecs_task_definition_memory_reservation = "${module.live_task_lookup.memory_reservation}"
live_aws_ecs_task_definition_environment_json = "${module.live_task_lookup.environment_json}"
live_aws_ecs_task_definition_docker_label_hash = "${module.live_task_lookup.docker_label_hash}"
live_aws_ecs_task_definition_secrets_hash = "${module.live_task_lookup.secrets_hash}"
}

#
@@ -17,6 +17,15 @@ resource "null_resource" "envvars_as_list_of_maps" {
)}"
}

resource "null_resource" "secrets_as_list_of_maps" {
count = "${length(keys(var.container_secrets))}"

triggers = "${map(
"name", "${local.safe_search_replace_string}${element(keys(var.container_secrets), count.index)}",
"valueFrom", "${local.safe_search_replace_string}${element(values(var.container_secrets), count.index)}",
)}"
}

locals {
port_mappings = {
with_port = [
@@ -43,11 +52,13 @@ locals {
command = "${var.container_command}"
workingDirectory = "${var.working_directory}"
readonlyRootFilesystem = "${var.readonly_root_filesystem}"
dockerLabels = "${local.docker_labels}"

privileged = "${var.privileged}"

hostname = "${var.hostname}"
environment = ["${null_resource.envvars_as_list_of_maps.*.triggers}"]
secrets = ["${null_resource.secrets_as_list_of_maps.*.triggers}"]
mountPoints = ["${var.mountpoints}"]
portMappings = "${local.port_mappings[local.use_port]}"
healthCheck = "${var.healthcheck}"
@@ -75,6 +75,16 @@ variable "container_envvars" {
default = {}
}

variable "container_secrets" {
description = <<EOF
The environment variables to pass to the container as SSM keys.
The keys will be looked up and the resulting values will be passed to the environment variable.
This is a map
EOF

default = {}
}

variable "readonly_root_filesystem" {
description = "Determines whether a container is given read-only access to its root filesystem. Due to how Terraform type casts booleans in json it is required to double quote this value."
default = "false"
@@ -133,9 +143,19 @@ locals {
}
}

# Secrets aren't trackable by the live_task_loop (thanks AWS!), so we store a hash of them as a synthetic Docker label on the container.
secrets_merge = {
"0" = {}

"1" = {
_airship_secrets_hash = "${md5(jsonencode(var.container_secrets))}"
}
}

docker_labels = "${merge(
var.container_docker_labels,
local.docker_label_merge[signum(length(var.container_docker_labels))])}"
local.docker_label_merge[signum(length(var.container_docker_labels))],
local.secrets_merge[signum(length(var.container_secrets))])}"
}

variable "tags" {
@@ -21,6 +21,7 @@ locals {
local.memory[0] != var.live_aws_ecs_task_definition_memory ||
local.memory_reservation[0] != "${var.live_aws_ecs_task_definition_memory_reservation == "undefined" ? "0" : var.live_aws_ecs_task_definition_memory_reservation}" ||
lookup(local.docker_labels[0],"_airship_dockerlabel_hash","") != var.live_aws_ecs_task_definition_docker_label_hash ||
lookup(local.docker_labels[0],"_airship_secrets_hash","") != var.live_aws_ecs_task_definition_secrets_hash ||
jsonencode(local.environment[0]) != var.live_aws_ecs_task_definition_environment_json }"

# If there is a difference, between the ( newly created) terraform state task definition and the live task definition
@@ -18,3 +18,4 @@ variable "live_aws_ecs_task_definition_memory" {}
variable "live_aws_ecs_task_definition_memory_reservation" {}
variable "live_aws_ecs_task_definition_environment_json" {}
variable "live_aws_ecs_task_definition_docker_label_hash" {}
variable "live_aws_ecs_task_definition_secrets_hash" {}

0 comments on commit aafb884

Please sign in to comment.
You can’t perform that action at this time.