Skip to content
This repository has been archived by the owner on Jan 19, 2022. It is now read-only.

Commit

Permalink
Use a automatic resource naming
Browse files Browse the repository at this point in the history
Currently we use hard coded names for resources like S3. This change
will allow us to use dynamic names auto-generated by aws and exposed
as an output. (Closes #123)

- S3 bucket names are optional in config, default to dynamic ones
- Add a method and fab_file function to allow getting a filtered
list of resources
- Update tests to look for new BucketName output and policy changes
- Add test to check config with s3 key but no subkeys loads ok
- PEP8 fixes
  • Loading branch information
Niall Creech committed Jul 21, 2015
1 parent 330cac6 commit 98ef760
Show file tree
Hide file tree
Showing 8 changed files with 116 additions and 35 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
* Allow security groups in YAML to reference themselves (for example an
"elasticsearch" SG that allows other instances of that sg (and itself) to
access port 9300)
* Use a automatic resource naming to allow S3 bucket names to be auto named
by AWS

## Version 0.5.3

Expand Down
29 changes: 24 additions & 5 deletions bootstrap_cfn/cloudformation.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,30 @@ def get_stack_load_balancers(self, stack_name_or_id):
load_balancers: Set of stack resources containing only
load balancers for this stack
"""
resource_type = 'AWS::ElasticLoadBalancing::LoadBalancer'
return self.get_resource_type(stack_name_or_id, resource_type)

def get_resource_type(self, stack_name_or_id, resource_type=None):
"""
Collect up a set of specific stack resources
Args:
stack_name_or_id (string): Name or id used to identify the stack
resource_type(string): The resource type identifier
Returns:
resources: Set of stack resources containing only
the resource type for this stack
"""
# get the stack
load_balancers = []
resources = []
stack = self.conn_cfn.describe_stacks(stack_name_or_id)
if stack:
fn = lambda x: x.resource_type == 'AWS::ElasticLoadBalancing::LoadBalancer'
# get the load balancers group
load_balancers = filter(fn, stack[0].list_resources())
return load_balancers
if resource_type:
fn = lambda x: x.resource_type == resource_type
# get the resources
resources = filter(fn, stack[0].list_resources())
else:
resources = stack[0].list_resources()

return resources
58 changes: 40 additions & 18 deletions bootstrap_cfn/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,11 @@ def process(self):
if 's3' in self.data:
s3 = self.s3()
map(template.add_resource, s3)
template.add_output(Output(
"StaticBucketName",
Description="S3 bucket name",
Value=Ref("StaticBucket")
))

template = json.loads(template.to_json())
if 'includes' in self.data:
Expand Down Expand Up @@ -264,29 +269,46 @@ def iam(self):
def s3(self):
# REQUIRED FIELDS AND MAPPING
required_fields = {
'static-bucket-name': 'BucketName'
}

# TEST FOR REQUIRED FIELDS AND EXIT IF MISSING ANY
present_keys = self.data['s3'].keys()
for i in required_fields.keys():
if i not in present_keys:
print "\n\n[ERROR] Missing S3 fields [%s]" % i
sys.exit(1)

bucket = Bucket(
"StaticBucket",
AccessControl="BucketOwnerFullControl",
BucketName=self.data['s3']['static-bucket-name'],
)
# As there are no required fields, although we may not have any
# subkeys we still need to be able to have a parent key 's3:' to
# signify that we want to create an s3 bucket. In this case we
# set up an empty (no options set) dictionary
if isinstance(self.data['s3'], dict):
present_keys = self.data['s3'].keys()
# TEST FOR REQUIRED FIELDS AND EXIT IF MISSING ANY
for i in required_fields.keys():
if i not in present_keys:
print "\n\n[ERROR] Missing S3 fields [%s]" % i
sys.exit(1)
else:
present_keys = {}

# If the static bucket name is manually set then use that,
# otherwise use the <stackname>-<logical-resource-name>-<random>
# default
if 'static-bucket-name' in present_keys:
bucket = Bucket(
"StaticBucket",
AccessControl="BucketOwnerFullControl",
BucketName=self.data['s3']['static-bucket-name'],
)
else:
bucket = Bucket(
"StaticBucket",
AccessControl="BucketOwnerFullControl",
)

# If a policy has been manually set then use it, otherwise set
# a reasonable default of public 'Get' access
if 'policy' in present_keys:
policy = json.loads(open(self.data['s3']['policy']).read())
else:
arn = 'arn:aws:s3:::%s/*' % self.data['s3']['static-bucket-name']
arn = Join("", ["arn:aws:s3:::", Ref(bucket), "/*"])
policy = {
'Action': ['s3:GetObject'],
'Resource': arn,
"Resource": [arn],
'Effect': 'Allow',
'Principal': '*'}

Expand Down Expand Up @@ -465,7 +487,7 @@ def elb(self):

elb_role_policies = PolicyType(
"Policy" + safe_name,
PolicyName=safe_name+"BaseHost",
PolicyName=safe_name + "BaseHost",
PolicyDocument={"Statement": [{
"Action": [
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
Expand All @@ -478,7 +500,7 @@ def elb(self):
":",
Ref("AWS::AccountId"),
':loadbalancer/%s' % load_balancer.LoadBalancerName
])
])
],
"Effect": "Allow"}
]},
Expand Down Expand Up @@ -507,7 +529,7 @@ def elb(self):
"FromPort": 443,
"ToPort": 443,
"CidrIp": "0.0.0.0/0"
},
},
{
"IpProtocol": "tcp",
"FromPort": 80,
Expand Down
1 change: 1 addition & 0 deletions bootstrap_cfn/ec2.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from bootstrap_cfn import cloudformation
from bootstrap_cfn import utils


class EC2:

conn_cfn = None
Expand Down
3 changes: 1 addition & 2 deletions bootstrap_cfn/elb.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def set_ssl_certificates(self, ssl_config, stack_name):
if protocol == "HTTPS":
logging.info("ELB::set_ssl_certificates: "
"Found HTTPS protocol on '%s', "
"updating SSL certificate with '%s'"
"updating SSL certificate with '%s'"
% (load_balancer.name, cert_arn))
self.conn_elb.set_lb_listener_SSL_certificate(load_balancer.name,
out_port,
Expand All @@ -83,4 +83,3 @@ def set_ssl_certificates(self, ssl_config, stack_name):
"No load balancers found in stack,")

return updated_load_balancers

2 changes: 1 addition & 1 deletion bootstrap_cfn/errors.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
class BootstrapCfnError(Exception):
def __init__(self, msg):
super(BootstrapCfnError, self).__init__(msg)
print >> sys.stderr, "[ERROR] {0}: {1}".format(self.__class__.__name__, msg)
print >> sys.stderr, "[ERROR] {0}: {1}".format(self.__class__.__name__, msg)


class CfnConfigError(BootstrapCfnError):
Expand Down
1 change: 0 additions & 1 deletion bootstrap_cfn/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,6 @@ def tail(stack, stack_name):
from fabric.colors import green, red, yellow
"""Show and then tail the event log"""


def colorize(e):
if e.endswith("_IN_PROGRESS"):
return yellow(e)
Expand Down
55 changes: 47 additions & 8 deletions tests/tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,12 +119,14 @@ def test_s3(self):
bucket_ref = Ref(bucket)

static_bp = s3.BucketPolicy('StaticBucketPolicy')
resource_value = [Join("", ["arn:aws:s3:::", {"Ref": "StaticBucket"}, "/*"])]

static_bp.PolicyDocument = {
'Statement': [
{
'Action': [
's3:GetObject'],
'Resource': 'arn:aws:s3:::moj-test-dev-static/*',
'Resource': resource_value,
'Effect': 'Allow',
'Principal': '*'
}
Expand All @@ -140,24 +142,57 @@ def test_s3(self):
compare(self._resources_to_dict([static_bp, bucket]),
self._resources_to_dict(config.s3()))

def test_s3_no_subkeys(self):
"""
Test that a config with the s3: key alone will load
"""
bucket = s3.Bucket('StaticBucket')
bucket.AccessControl = 'BucketOwnerFullControl'
bucket_ref = Ref(bucket)

static_bp = s3.BucketPolicy('StaticBucketPolicy')
resource_value = [Join("", ["arn:aws:s3:::", {"Ref": "StaticBucket"}, "/*"])]

static_bp.PolicyDocument = {
'Statement': [
{
'Action': [
's3:GetObject'],
'Resource': resource_value,
'Effect': 'Allow',
'Principal': '*'
}
]
}
static_bp.Bucket = bucket_ref

# Load project config and wipe out all s3 subkeys
base_config = ProjectConfig('tests/sample-project.yaml', 'dev')
base_config.config["s3"] = None
config = ConfigParser(base_config.config,
'my-stack-name')
compare(self._resources_to_dict([static_bp, bucket]),
self._resources_to_dict(config.s3()))

def test_custom_s3_policy(self):
resource_value = 'arn:aws:s3:::moj-test-dev-static/*'
expected_s3 = [
{
'Action': [
's3:Get*',
's3:Put*',
's3:List*',
's3:Delete*'],
'Resource': 'arn:aws:s3:::moj-test-dev-static/*',
'Effect': 'Allow',
'Principal': {'AWS': '*'}
'Resource': resource_value,
'Effect': 'Allow',
'Principal': {'AWS': '*'}
}
]

project_config = ProjectConfig('tests/sample-project.yaml', 'dev')

project_config.config['s3'] = {
'static-bucket-name': 'moj-test-dev-static',
'StaticBucketName': 'moj-test-dev-static',
'policy': 'tests/sample-custom-s3-policy.json'}

config = ConfigParser(project_config.config, 'my-stack-name')
Expand Down Expand Up @@ -467,6 +502,10 @@ def test_cf_includes(self):
"someoutput": {
"Description": "For tests",
"Value": "BLAHBLAH"
},
"StaticBucketName": {
"Description": "S3 bucket name",
"Value": {"Ref": "StaticBucket"}
}
}
config = ConfigParser(project_config.config, 'my-stack-name')
Expand Down Expand Up @@ -498,14 +537,14 @@ def test_process(self):
"RolePolicies", "ScalingGroup", "StaticBucket",
"StaticBucketPolicy", "SubnetA", "SubnetB", "SubnetC",
"SubnetRouteTableAssociationA", "SubnetRouteTableAssociationB",
"SubnetRouteTableAssociationC", "VPC"
"SubnetRouteTableAssociationC", "VPC",
]

resource_names = cfn_template['Resources'].keys()
resource_names.sort()
compare(resource_names, wanted)

wanted = ["dbhost", "dbport"]
wanted = ["StaticBucketName", "dbhost", "dbport"]
output_names = cfn_template['Outputs'].keys()
output_names.sort()
compare(output_names, wanted)
Expand Down Expand Up @@ -561,7 +600,7 @@ def test_process_with_vpc_config(self):
resource_names.sort()
compare(resource_names, wanted)

wanted = ["dbhost", "dbport"]
wanted = ["StaticBucketName", "dbhost", "dbport"]
output_names = cfn_template['Outputs'].keys()
output_names.sort()
compare(output_names, wanted)
Expand Down

0 comments on commit 98ef760

Please sign in to comment.