From 98ef7607a4d73d71a707eeaa7d15f4e077a46f27 Mon Sep 17 00:00:00 2001 From: Niall Creech Date: Mon, 20 Jul 2015 13:09:51 +0100 Subject: [PATCH] Use a automatic resource naming Currently we use hard coded names for resources like S3. This change will allow us to use dynamic names auto-generated by aws and exposed as an output. (Closes ministryofjustice/bootstrap-cfn#123) - S3 bucket names are optional in config, default to dynamic ones - Add a method and fab_file function to allow getting a filtered list of resources - Update tests to look for new BucketName output and policy changes - Add test to check config with s3 key but no subkeys loads ok - PEP8 fixes --- CHANGELOG.md | 2 ++ bootstrap_cfn/cloudformation.py | 29 ++++++++++++++--- bootstrap_cfn/config.py | 58 +++++++++++++++++++++++---------- bootstrap_cfn/ec2.py | 1 + bootstrap_cfn/elb.py | 3 +- bootstrap_cfn/errors.py | 2 +- bootstrap_cfn/utils.py | 1 - tests/tests.py | 55 ++++++++++++++++++++++++++----- 8 files changed, 116 insertions(+), 35 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6eef8dc..34de36a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,8 @@ * Allow security groups in YAML to reference themselves (for example an "elasticsearch" SG that allows other instances of that sg (and itself) to access port 9300) +* Use a automatic resource naming to allow S3 bucket names to be auto named +by AWS ## Version 0.5.3 diff --git a/bootstrap_cfn/cloudformation.py b/bootstrap_cfn/cloudformation.py index ede6cf4..5cdae54 100644 --- a/bootstrap_cfn/cloudformation.py +++ b/bootstrap_cfn/cloudformation.py @@ -58,11 +58,30 @@ def get_stack_load_balancers(self, stack_name_or_id): load_balancers: Set of stack resources containing only load balancers for this stack """ + resource_type = 'AWS::ElasticLoadBalancing::LoadBalancer' + return self.get_resource_type(stack_name_or_id, resource_type) + + def get_resource_type(self, stack_name_or_id, resource_type=None): + """ + Collect up a set of specific stack resources + + Args: + stack_name_or_id (string): Name or id used to identify the stack + resource_type(string): The resource type identifier + + Returns: + resources: Set of stack resources containing only + the resource type for this stack + """ # get the stack - load_balancers = [] + resources = [] stack = self.conn_cfn.describe_stacks(stack_name_or_id) if stack: - fn = lambda x: x.resource_type == 'AWS::ElasticLoadBalancing::LoadBalancer' - # get the load balancers group - load_balancers = filter(fn, stack[0].list_resources()) - return load_balancers + if resource_type: + fn = lambda x: x.resource_type == resource_type + # get the resources + resources = filter(fn, stack[0].list_resources()) + else: + resources = stack[0].list_resources() + + return resources diff --git a/bootstrap_cfn/config.py b/bootstrap_cfn/config.py index 31d045c..b6ee48e 100644 --- a/bootstrap_cfn/config.py +++ b/bootstrap_cfn/config.py @@ -90,6 +90,11 @@ def process(self): if 's3' in self.data: s3 = self.s3() map(template.add_resource, s3) + template.add_output(Output( + "StaticBucketName", + Description="S3 bucket name", + Value=Ref("StaticBucket") + )) template = json.loads(template.to_json()) if 'includes' in self.data: @@ -264,29 +269,46 @@ def iam(self): def s3(self): # REQUIRED FIELDS AND MAPPING required_fields = { - 'static-bucket-name': 'BucketName' } - # TEST FOR REQUIRED FIELDS AND EXIT IF MISSING ANY - present_keys = self.data['s3'].keys() - for i in required_fields.keys(): - if i not in present_keys: - print "\n\n[ERROR] Missing S3 fields [%s]" % i - sys.exit(1) - - bucket = Bucket( - "StaticBucket", - AccessControl="BucketOwnerFullControl", - BucketName=self.data['s3']['static-bucket-name'], - ) + # As there are no required fields, although we may not have any + # subkeys we still need to be able to have a parent key 's3:' to + # signify that we want to create an s3 bucket. In this case we + # set up an empty (no options set) dictionary + if isinstance(self.data['s3'], dict): + present_keys = self.data['s3'].keys() + # TEST FOR REQUIRED FIELDS AND EXIT IF MISSING ANY + for i in required_fields.keys(): + if i not in present_keys: + print "\n\n[ERROR] Missing S3 fields [%s]" % i + sys.exit(1) + else: + present_keys = {} + + # If the static bucket name is manually set then use that, + # otherwise use the -- + # default + if 'static-bucket-name' in present_keys: + bucket = Bucket( + "StaticBucket", + AccessControl="BucketOwnerFullControl", + BucketName=self.data['s3']['static-bucket-name'], + ) + else: + bucket = Bucket( + "StaticBucket", + AccessControl="BucketOwnerFullControl", + ) + # If a policy has been manually set then use it, otherwise set + # a reasonable default of public 'Get' access if 'policy' in present_keys: policy = json.loads(open(self.data['s3']['policy']).read()) else: - arn = 'arn:aws:s3:::%s/*' % self.data['s3']['static-bucket-name'] + arn = Join("", ["arn:aws:s3:::", Ref(bucket), "/*"]) policy = { 'Action': ['s3:GetObject'], - 'Resource': arn, + "Resource": [arn], 'Effect': 'Allow', 'Principal': '*'} @@ -465,7 +487,7 @@ def elb(self): elb_role_policies = PolicyType( "Policy" + safe_name, - PolicyName=safe_name+"BaseHost", + PolicyName=safe_name + "BaseHost", PolicyDocument={"Statement": [{ "Action": [ "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", @@ -478,7 +500,7 @@ def elb(self): ":", Ref("AWS::AccountId"), ':loadbalancer/%s' % load_balancer.LoadBalancerName - ]) + ]) ], "Effect": "Allow"} ]}, @@ -507,7 +529,7 @@ def elb(self): "FromPort": 443, "ToPort": 443, "CidrIp": "0.0.0.0/0" - }, + }, { "IpProtocol": "tcp", "FromPort": 80, diff --git a/bootstrap_cfn/ec2.py b/bootstrap_cfn/ec2.py index 4c0cb3e..566589e 100644 --- a/bootstrap_cfn/ec2.py +++ b/bootstrap_cfn/ec2.py @@ -3,6 +3,7 @@ from bootstrap_cfn import cloudformation from bootstrap_cfn import utils + class EC2: conn_cfn = None diff --git a/bootstrap_cfn/elb.py b/bootstrap_cfn/elb.py index 611cd78..28fd861 100644 --- a/bootstrap_cfn/elb.py +++ b/bootstrap_cfn/elb.py @@ -67,7 +67,7 @@ def set_ssl_certificates(self, ssl_config, stack_name): if protocol == "HTTPS": logging.info("ELB::set_ssl_certificates: " "Found HTTPS protocol on '%s', " - "updating SSL certificate with '%s'" + "updating SSL certificate with '%s'" % (load_balancer.name, cert_arn)) self.conn_elb.set_lb_listener_SSL_certificate(load_balancer.name, out_port, @@ -83,4 +83,3 @@ def set_ssl_certificates(self, ssl_config, stack_name): "No load balancers found in stack,") return updated_load_balancers - diff --git a/bootstrap_cfn/errors.py b/bootstrap_cfn/errors.py index 474bcd2..0f8cc37 100644 --- a/bootstrap_cfn/errors.py +++ b/bootstrap_cfn/errors.py @@ -4,7 +4,7 @@ class BootstrapCfnError(Exception): def __init__(self, msg): super(BootstrapCfnError, self).__init__(msg) - print >> sys.stderr, "[ERROR] {0}: {1}".format(self.__class__.__name__, msg) + print >> sys.stderr, "[ERROR] {0}: {1}".format(self.__class__.__name__, msg) class CfnConfigError(BootstrapCfnError): diff --git a/bootstrap_cfn/utils.py b/bootstrap_cfn/utils.py index 1c458fb..fb0506e 100644 --- a/bootstrap_cfn/utils.py +++ b/bootstrap_cfn/utils.py @@ -78,7 +78,6 @@ def tail(stack, stack_name): from fabric.colors import green, red, yellow """Show and then tail the event log""" - def colorize(e): if e.endswith("_IN_PROGRESS"): return yellow(e) diff --git a/tests/tests.py b/tests/tests.py index 5ea43dd..f83327a 100755 --- a/tests/tests.py +++ b/tests/tests.py @@ -119,12 +119,14 @@ def test_s3(self): bucket_ref = Ref(bucket) static_bp = s3.BucketPolicy('StaticBucketPolicy') + resource_value = [Join("", ["arn:aws:s3:::", {"Ref": "StaticBucket"}, "/*"])] + static_bp.PolicyDocument = { 'Statement': [ { 'Action': [ 's3:GetObject'], - 'Resource': 'arn:aws:s3:::moj-test-dev-static/*', + 'Resource': resource_value, 'Effect': 'Allow', 'Principal': '*' } @@ -140,7 +142,40 @@ def test_s3(self): compare(self._resources_to_dict([static_bp, bucket]), self._resources_to_dict(config.s3())) + def test_s3_no_subkeys(self): + """ + Test that a config with the s3: key alone will load + """ + bucket = s3.Bucket('StaticBucket') + bucket.AccessControl = 'BucketOwnerFullControl' + bucket_ref = Ref(bucket) + + static_bp = s3.BucketPolicy('StaticBucketPolicy') + resource_value = [Join("", ["arn:aws:s3:::", {"Ref": "StaticBucket"}, "/*"])] + + static_bp.PolicyDocument = { + 'Statement': [ + { + 'Action': [ + 's3:GetObject'], + 'Resource': resource_value, + 'Effect': 'Allow', + 'Principal': '*' + } + ] + } + static_bp.Bucket = bucket_ref + + # Load project config and wipe out all s3 subkeys + base_config = ProjectConfig('tests/sample-project.yaml', 'dev') + base_config.config["s3"] = None + config = ConfigParser(base_config.config, + 'my-stack-name') + compare(self._resources_to_dict([static_bp, bucket]), + self._resources_to_dict(config.s3())) + def test_custom_s3_policy(self): + resource_value = 'arn:aws:s3:::moj-test-dev-static/*' expected_s3 = [ { 'Action': [ @@ -148,16 +183,16 @@ def test_custom_s3_policy(self): 's3:Put*', 's3:List*', 's3:Delete*'], - 'Resource': 'arn:aws:s3:::moj-test-dev-static/*', - 'Effect': 'Allow', - 'Principal': {'AWS': '*'} + 'Resource': resource_value, + 'Effect': 'Allow', + 'Principal': {'AWS': '*'} } ] project_config = ProjectConfig('tests/sample-project.yaml', 'dev') project_config.config['s3'] = { - 'static-bucket-name': 'moj-test-dev-static', + 'StaticBucketName': 'moj-test-dev-static', 'policy': 'tests/sample-custom-s3-policy.json'} config = ConfigParser(project_config.config, 'my-stack-name') @@ -467,6 +502,10 @@ def test_cf_includes(self): "someoutput": { "Description": "For tests", "Value": "BLAHBLAH" + }, + "StaticBucketName": { + "Description": "S3 bucket name", + "Value": {"Ref": "StaticBucket"} } } config = ConfigParser(project_config.config, 'my-stack-name') @@ -498,14 +537,14 @@ def test_process(self): "RolePolicies", "ScalingGroup", "StaticBucket", "StaticBucketPolicy", "SubnetA", "SubnetB", "SubnetC", "SubnetRouteTableAssociationA", "SubnetRouteTableAssociationB", - "SubnetRouteTableAssociationC", "VPC" + "SubnetRouteTableAssociationC", "VPC", ] resource_names = cfn_template['Resources'].keys() resource_names.sort() compare(resource_names, wanted) - wanted = ["dbhost", "dbport"] + wanted = ["StaticBucketName", "dbhost", "dbport"] output_names = cfn_template['Outputs'].keys() output_names.sort() compare(output_names, wanted) @@ -561,7 +600,7 @@ def test_process_with_vpc_config(self): resource_names.sort() compare(resource_names, wanted) - wanted = ["dbhost", "dbport"] + wanted = ["StaticBucketName", "dbhost", "dbport"] output_names = cfn_template['Outputs'].keys() output_names.sort() compare(output_names, wanted)