diff --git a/README.rst b/README.rst index 68ec2ab..364dea6 100644 --- a/README.rst +++ b/README.rst @@ -1,3 +1,4 @@ +# flake8: noqa .. image:: https://travis-ci.org/ministryofjustice/bootstrap-cfn.svg :target: https://travis-ci.org/ministryofjustice/bootstrap-cfn @@ -444,6 +445,22 @@ It is possilbe to define a custom health check for an ELB like follows ELB Certificates ++++++++++++++++ +ACM +~~~ + +This section defines certificates for the AWS Certificate Manager. For verification, these will require the setting up of SES for the ValidationDomain so that emails to admin@address.com can be recieved. + +.. code:: yaml + + acm: + my-cert: + domain: helloworld.test.dsd.io # (required) The domain name or wildcard the certificate should cover + validation_domain: dsd.io # (optional) The domain name the verfication email should go to. The default is dsd.io + + +Manual SSL +~~~~~~~~~~ + The SSL certificate will be uploaded before the stack is created and removed after it is deleted. To update the SSL certificate on ELB listeners run the fab task below, this uploads and updates the certificate on each HTTPS listener on your ELBs, by default the old certificate is deleted. diff --git a/bootstrap_cfn/config.py b/bootstrap_cfn/config.py index 86a9b70..afc0f01 100644 --- a/bootstrap_cfn/config.py +++ b/bootstrap_cfn/config.py @@ -12,6 +12,7 @@ from troposphere import Base64, FindInMap, GetAZs, GetAtt, Join, Output, Ref, Tags, Template from troposphere.autoscaling import AutoScalingGroup, BlockDeviceMapping, \ EBSBlockDevice, LaunchConfiguration, Tag +from troposphere.certificatemanager import Certificate, DomainValidationOption from troposphere.ec2 import InternetGateway, Route, RouteTable, SecurityGroup, \ SecurityGroupIngress, Subnet, SubnetRouteTableAssociation, VPC, \ VPCGatewayAttachment @@ -155,7 +156,6 @@ def process(self): template, sort_keys=True, indent=None, separators=(',', ': ')) def base_template(self): - from bootstrap_cfn import vpc t = Template() # Get the OS specific data @@ -430,7 +430,7 @@ def create_s3_bucket(self, bucket_config, template): map(template.add_resource, [bucket, bucket_policy]) def ssl(self): - return self.data['ssl'] + return self.data.get('ssl', {}) def rds(self, template): """ @@ -730,46 +730,14 @@ def elb(self, template): for listener in load_balancer.Listeners: if listener['Protocol'] == 'HTTPS': - try: - cert_name = elb['certificate_name'] - except KeyError: - raise errors.CfnConfigError( - "HTTPS listener but no certificate_name specified") - try: - self.ssl()[cert_name]['cert'] - self.ssl()[cert_name]['key'] - except KeyError: - raise errors.CfnConfigError( - "Couldn't find ssl cert {0} in config file".format(cert_name)) - - listener["SSLCertificateId"] = Join("", [ - "arn:aws:iam::", - Ref("AWS::AccountId"), - ":server-certificate/", - "{0}-{1}".format(cert_name, self.stack_name)] - ) + listener["SSLCertificateId"] = self._get_ssl_certificate(template, elb.get('certificate_name', None)) # if not present, add the default cipher policy if 'PolicyNames' not in listener: logging.debug( "ELB Listener for port 443 has no SSL Policy. " + "Using default ELBSecurityPolicy-2015-05") listener['PolicyNames'] = ['PinDownSSLNegotiationPolicy201505'] - """ - # Get all the listeners policy names and setup the policies they refer to - for policy_name in listener.get('PolicyNames', []): - matched_policies = [custom_policy for custom_policy in elb_policies - if custom_policy.PolicyName == policy_name] - assert(len(matched_policies) == 1) - matched_policy = matched_policies[0] - # Get the current ports defined in the troposphere policies config and append - # the listers ports - updated_instance_ports = matched_policy.properties.get('InstancePorts', []) - updated_instance_ports.append("{}".format(listener['InstancePort'])) - matched_policy.properties['InstancePorts'] = updated_instance_ports - updated_instance_ports = matched_policy.properties.get('LoadBalancerPorts', []) - updated_instance_ports.append("{}".format(listener['LoadBalancerPort'])) - matched_policy.properties['LoadBalancerPorts'] = updated_instance_ports - """ + elb_list.append(load_balancer) dns_record = RecordSetGroup( @@ -1109,9 +1077,71 @@ def ec2(self): return resources + def _get_ssl_certificate(self, template, certificate_name): + # Create a certificate if required, first try to get an ACM certificate, + # else look for a manual SSL entry. + if not certificate_name: + raise errors.CfnConfigError("Certificate name {} is invalid.".format(certificate_name)) + + acm_certificate = self._get_acm_certificate(certificate_name) + if acm_certificate: + logging.info("config::_get_ssl_certificate: Found ACM certificate.") + template.add_resource(acm_certificate) + return Ref(acm_certificate) + + ssl_certificate = self._get_manual_ssl_certificate(certificate_name) + if ssl_certificate: + logging.info("config::_get_ssl_certificate: Found manual SSL certificate.") + return ssl_certificate + + raise errors.CfnConfigError("Couldn't find ACM or manual SSL certificate configuration " + "{0} in config file".format(certificate_name)) + + def _get_acm_certificate(self, certificate_name): + acm_data = self.data.get('acm', {}).get(certificate_name, None) + if not acm_data: + logging.error("config::_get_acm_certificate: Could not find ACM configuration for {}" + .format(certificate_name)) + return None + logging.info("config::_get_acm_certificate: Creating certificate {} for domain {}" + .format(certificate_name, acm_data.get('domain'))) + certificate = Certificate( + certificate_name, + DomainName=acm_data.get('domain'), + DomainValidationOptions=[ + DomainValidationOption( + DomainName=acm_data.get('domain'), + ValidationDomain=acm_data.get('validation_domain', 'dsd.io'), + ), + ], + Tags=[{'Key': key, 'Value': value} for key, value in acm_data.get('tags', {}).iteritems()] + ) + return certificate + + def _get_manual_ssl_certificate(self, certificate_name): + if self.ssl().get(certificate_name, {}).get('cert', None) is None: + logging.error("config::_get_manual_ssl_certificate: No cert information found for {}" + .format(certificate_name)) + return None + if self.ssl().get(certificate_name, {}).get('key', None) is None: + logging.error("config::_get_manual_ssl_certificate: No key information found for {}" + .format(certificate_name)) + return None + + certificate = Join( + "", + [ + "arn:aws:iam::", + Ref("AWS::AccountId"), + ":server-certificate/", + "{0}-{1}".format(certificate_name, self.stack_name) + ] + ) + return certificate + @classmethod def _find_resources(cls, template, resource_type): - f = lambda x: x.resource_type == resource_type + def f(x): return (x.resource_type == resource_type) return filter(f, template.resources.values()) @classmethod @@ -1153,64 +1183,64 @@ def _get_os_data(self): OSTypeNotFoundError: Raised when the OS in the config file is not recognised """ - region=env.aws_region + region = env.aws_region os_default = 'ubuntu-1404' if region == 'eu-west-2': - available_types = { - 'ubuntu-1604': { - 'name': 'ubuntu-1604', - 'ami': 'ami-57eae033', - 'region': region, - 'distribution': 'ubuntu', - 'type': 'linux', - 'release': '20161214' - }, - 'ubuntu-1404': { - 'name': 'ubuntu-1404', - 'ami': 'ami-45eae021', - 'region': region, - 'distribution': 'ubuntu', - 'type': 'linux', - 'release': '20161213' - }, - 'windows2012': { - 'name': 'windows2012', - 'ami': 'ami-bb353fdf', - 'region': region, - 'distribution': 'windows', - 'type': 'windows', - 'release': '2016.11.23' - } - } + available_types = { + 'ubuntu-1604': { + 'name': 'ubuntu-1604', + 'ami': 'ami-57eae033', + 'region': region, + 'distribution': 'ubuntu', + 'type': 'linux', + 'release': '20161214' + }, + 'ubuntu-1404': { + 'name': 'ubuntu-1404', + 'ami': 'ami-45eae021', + 'region': region, + 'distribution': 'ubuntu', + 'type': 'linux', + 'release': '20161213' + }, + 'windows2012': { + 'name': 'windows2012', + 'ami': 'ami-bb353fdf', + 'region': region, + 'distribution': 'windows', + 'type': 'windows', + 'release': '2016.11.23' + } + } elif env.aws_region == 'eu-west-1': - available_types = { - 'ubuntu-1604': { - 'name': 'ubuntu-1604', - 'ami': 'ami-6f587e1c', - 'region': region, - 'distribution': 'ubuntu', - 'type': 'linux', - 'release': '20161214' - }, - 'ubuntu-1404': { - 'name': 'ubuntu-1404', - 'ami': 'ami-f95ef58a', - 'region': region, - 'distribution': 'ubuntu', - 'type': 'linux', - 'release': '20160509.1' - }, - 'windows2012': { - 'name': 'windows2012', - 'ami': 'ami-8519a9f6', - 'region': region, - 'distribution': 'windows', - 'type': 'windows', - 'release': '2015.12.31' - } - } + available_types = { + 'ubuntu-1604': { + 'name': 'ubuntu-1604', + 'ami': 'ami-6f587e1c', + 'region': region, + 'distribution': 'ubuntu', + 'type': 'linux', + 'release': '20161214' + }, + 'ubuntu-1404': { + 'name': 'ubuntu-1404', + 'ami': 'ami-f95ef58a', + 'region': region, + 'distribution': 'ubuntu', + 'type': 'linux', + 'release': '20160509.1' + }, + 'windows2012': { + 'name': 'windows2012', + 'ami': 'ami-8519a9f6', + 'region': region, + 'distribution': 'windows', + 'type': 'windows', + 'release': '2015.12.31' + } + } else: - raise errors.CfnConfigError('Region {} is not supported'.format(region)) + raise errors.CfnConfigError('Region {} is not supported'.format(region)) os_choice = self.data['ec2'].get('os', os_default) if not available_types.get(os_choice, False): diff --git a/docs/SESLambdaForwarder.yaml b/docs/SESLambdaForwarder.yaml new file mode 100644 index 0000000..26cf4aa --- /dev/null +++ b/docs/SESLambdaForwarder.yaml @@ -0,0 +1,213 @@ +# flake8: noqa +AWSTemplateFormatVersion: '2010-09-09' +Description: S3 Backed Lambda Email Forwarder for SES +Parameters: + FromAddress: + Description: The verified SES email address to send from + Type: String + FilterAddresses: + Description: The destination addresses to filter + Type: CommaDelimitedList + ForwardingAddresses: + Description: The destination addresses to forward the email to + Type: CommaDelimitedList +Resources: + SESACMS3Bucket: + Type: AWS::S3::Bucket + Properties: + LifecycleConfiguration: + Rules: + - ExpirationInDays: 3 + Status: Enabled + SESS3BucketPolicy: + Type: "AWS::S3::BucketPolicy" + Properties: + Bucket: + Ref: "SESACMS3Bucket" + PolicyDocument: + Statement: + Effect: Allow + Principal: + Service: ses.amazonaws.com + Action: s3:PutObject + Resource: + - Fn::Join: + - '' + - - 'arn:aws:s3:::' + - Ref: SESACMS3Bucket + - "/*" + Condition: + StringEquals: + aws:Referer: + Ref: AWS::AccountId + LambdaSESACMForwarderRole: + Type: AWS::IAM::Role + DependsOn: + - SESACMS3Bucket + Properties: + AssumeRolePolicyDocument: + Version: '2012-10-17' + Statement: + Effect: Allow + Principal: + Service: lambda.amazonaws.com + Action: sts:AssumeRole + Path: "/" + Policies: + - PolicyName: ses-send-email + PolicyDocument: + Version: '2012-10-17' + Statement: + Effect: Allow + Action: + - ses:SendEmail + - ses:SendRawEmail + Resource: + - "*" + - PolicyName: lambda-cloudwatch-access + PolicyDocument: + Version: '2012-10-17' + Statement: + Effect: Allow + Action: + - logs:CreateLogGroup + - logs:CreateLogStream + - logs:PutLogEvents + Resource: + - "arn:aws:logs:*:*:*" + - PolicyName: lambda-s3-access + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: + - s3:GetObject + Resource: + - Fn::Join: + - '' + - - 'arn:aws:s3:::' + - Ref: SESACMS3Bucket + - "/*" + SESACMForwarderLambda: + Type: AWS::Lambda::Function + Properties: + Description: Function for forwarding mail from S3 buckets + Handler: index.handler + Timeout: 60 + MemorySize: 128 + Role: + Fn::GetAtt: + - LambdaSESACMForwarderRole + - Arn + Runtime: python2.7 + Environment: + Variables: + SESACMS3BucketName: + Ref: SESACMS3Bucket + FromAddress: + Ref: FromAddress + FilterAddresses: + !Join [ ",", !Ref "FilterAddresses" ] + ForwardingAddresses: + !Join [ ",", !Ref "ForwardingAddresses" ] + Code: + ZipFile: | + # coding: utf-8 + import boto3 + import email + import json + import logging + import os + logger = logging.getLogger() + logger.setLevel(logging.DEBUG) + + def handler(event, context): + logger.info("Collecting event record data...") + record = event["Records"][0] + try: + logger.info("Looking for SES event...") + bucket_name = os.environ['SESACMS3BucketName'] + message_id = record["ses"]["mail"]["messageId"] + message_source = record["ses"]["mail"]["source"] + message_destination = record["ses"]["mail"]["destination"] + except KeyError: + logger.critical("There was a problem retrieving data " + "from the event record, {}".format(record)) + return("FAIL") + + s3_client = boto3.client('s3') + logger.info("Fetching s3 object: {}/{}".format(bucket_name, message_id)) + mail_object = s3_client.get_object(Bucket = bucket_name, Key = message_id) + logger.info("Decoding mail body...") + email_data = mail_object["Body"].read().decode('utf-8') + + # Get env variables + # We need to use a verified email address rather than relying on the source + logger.info("Retrieving environment settings...") + email_from = os.environ['FromAddress'] + filter_addresses = os.environ['FilterAddresses'].split(",") + forwarding_addresses = os.environ['ForwardingAddresses'].split(",") + # Filter out addresses + filtered_addresses = [address for address in message_destination if address in filter_addresses] + + if len(filtered_addresses) == 0: + logger.debug("No filtering addresses found, skipping message...") + return ("CONTINUE") + logger.info("Found filtering addresses {}, " + "forwarding the message...".format(filtered_addresses)) + + email_object = email.message_from_string(email_data) + email_subject = email_object.get('Subject', 'Verification message for ACM') + logger.info("Parsing mail: {}".format(email_subject)) + email_text="" + + for part in email_object.walk(): + c_type = part.get_content_type() + c_disp = part.get('Content-Disposition') + if c_type == 'text/plain' and c_disp == None: + email_text = email_text + '\n' + part.get_payload() + else: + continue + logger.info("Connecting to SES client") + ses_client = boto3.client('ses') + response = ses_client.send_email( + Source=email_from, + Destination={ + 'ToAddresses': forwarding_addresses + }, + Message={ + 'Subject': { + 'Data': email_subject, + }, + 'Body': { + 'Text': { + 'Data': email_text, + } + } + }, + Tags=[ + { + 'Name': 'string', + 'Value': 'string' + }, + ], + ) + logger.info("Sent verification email successfully to {}".format(forwarding_addresses)) + + return "CONTINUE" + +Outputs: + SESACMS3BucketName: + Description: The bucket that stores SES ACM mail + Value: + Ref: SESACMS3Bucket + SESACMFromAddress: + Description: The address that sends SES mail + Value: + Ref: FromAddress + SESACMFilterAddresses: + Description: The addresses filtered to be forwarded + Value: !Join [ ",", !Ref "FilterAddresses" ] + SESACMToAddresses: + Description: The addresses that will receive SES mail + Value: !Join [ ",", !Ref "ForwardingAddresses" ] diff --git a/docs/images/acm_ses_sns.png b/docs/images/acm_ses_sns.png new file mode 100644 index 0000000..0df6e3a Binary files /dev/null and b/docs/images/acm_ses_sns.png differ diff --git a/docs/ses_acm_validation.md b/docs/ses_acm_validation.md new file mode 100644 index 0000000..7f6473b --- /dev/null +++ b/docs/ses_acm_validation.md @@ -0,0 +1,73 @@ +# flake8: noqa +Creating an Automated AWS Certificate Manager Verification Pipeline +------------------------------------------------------------------- + +AWS Certification Manager (ACM) requires verification of the ownership of the domain name, +or alternatively, of the super-domain of the domain that you want the certificate for. +Currently the only way this is done is through email to the following addresses, + +* Whois domain, technical, and administrative contact +* administrator, hostmaster, postmaster, webmaster, and admin at the requested domain or the super-domain if specified. + +http://docs.aws.amazon.com/acm/latest/userguide/gs-acm-validate. html + +![alt text](images/ses_acm_ses_sns.png "Certificate manager to simple email service pipeline") + +Using the super-domain as the validation domain +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +With a lot of hosted zones we could end up doing extra work to add MX records to everyone +of them, luckily, we can use the super-domain instead of the actual domain to do verifications. +This means that instead of requiring setting up mail every hosted zone, we can simply set all +the sub-domains to verify on the super-domain. For example, mysite.mysubdomain.mydomain.com +sets validation domain to mydomain.com, so the verification emails will not be set to +admin@mysite.mysubdomain.superdomain.com, but instead to admin@superdomain.com. + +See http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/ aws-resource-certificatemanager-certificate.html + + Type: "AWS::CertificateManager::Certificate" + Properties: + DomainName: String + DomainValidationOptions: + − DomainValidationOptions + SubjectAlternativeNames: + − String + Tags: + − Resource Tag + +Domain validation with an external mail server +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To achieve this the most straightforward way would be to set up a mail MX record in route53 +for your domain or super-domain pointing at your existing mail service. + +Domain validation using SES +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +We can set up Amazon Simple Email Service (SES) to work together with ACM. +First we setup a route 53 MX record on the super-domain hosted zone to use the Simple Email Service (SES). +Add an SES ruleset that sends mail to an S3 bucket and triggers a Lambda function that will +forward the email from the bucket. This way the verification email can be sent on to real administrators. + +Using the SESLambdaForwarder cloudformation stack +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +There’s a cloudformation template available for doing this, +see `the SES lambda forwarder template `_ + +When creating this stack you can set the email addresses to forward, and a set of +forwarding addresses. This cloudformation sets up a lambda function and a S3 bucket +with default 3 day deletion policy. We need to setup SES on the domain we want to +receive mail on since it doesnt have cloudformation support, but luckily this is +straightforward. + +> Note, SES may bounce emails when sending to meta-email addresses such as for groups. See `the SES FAQ `_ for more details. + +Setup SES on the mail domain +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* Create an SES domain for the base domain name superdomain.com. +* Let the SES domain creation set route 53 entries for verification and MX, eg in the superdomain.com hosted zone, superdomain.com MX 10 inbound−smtp.eu−west−1.amazonaws.com. +* Create a SES rule et to send emails to the S3 bucket. +* When creating certificates, use superdomain.com as the validation domain, this means admin@superdomain.com will be emailed when we create any sub-domain something.somewhere.superdomain.com. +* While the stack requiring an ACM SSL certificate is being created, the forwarding addresses should all receive a validation email.