From 3bdcaf69cd5453499e56246fa10ae9198923dc4a Mon Sep 17 00:00:00 2001 From: yufangzhang Date: Fri, 24 Jun 2016 15:44:46 +0100 Subject: [PATCH 1/8] Add stack_id to tag stack create stack_id to tag stack; add stack_id to dns record name to identify each stack record --- bootstrap_cfn/config.py | 4 +- bootstrap_cfn/config.py.orig | 1152 +++++++++++++++++++++++++++++++ bootstrap_cfn/config.py.rej | 17 + bootstrap_cfn/fab_tasks.py | 13 +- bootstrap_cfn/fab_tasks.py.orig | 704 +++++++++++++++++++ bootstrap_cfn/fab_tasks.py.rej | 59 ++ 6 files changed, 1942 insertions(+), 7 deletions(-) create mode 100644 bootstrap_cfn/config.py.orig create mode 100644 bootstrap_cfn/config.py.rej create mode 100755 bootstrap_cfn/fab_tasks.py.orig create mode 100644 bootstrap_cfn/fab_tasks.py.rej diff --git a/bootstrap_cfn/config.py b/bootstrap_cfn/config.py index 6e02f2a..07bb389 100644 --- a/bootstrap_cfn/config.py +++ b/bootstrap_cfn/config.py @@ -110,7 +110,7 @@ class ConfigParser(object): def __init__(self, data, stack_name, environment=None, application=None): self.stack_name = stack_name self.data = data - + self.stack_id = self.stack_name.split('-')[-1] # Some things possibly used in user data templates self.environment = environment self.application = application @@ -813,7 +813,7 @@ def elb(self, template): RecordSets=[ RecordSet( "TitleIsIgnoredForThisResource", - Name="%s.%s" % (elb['name'], elb['hosted_zone']), + Name="%s-%s.%s" % (elb['name'], self.stack_id, elb['hosted_zone']), Type="A", AliasTarget=AliasTarget( GetAtt(load_balancer, "CanonicalHostedZoneNameID"), diff --git a/bootstrap_cfn/config.py.orig b/bootstrap_cfn/config.py.orig new file mode 100644 index 0000000..6e5e022 --- /dev/null +++ b/bootstrap_cfn/config.py.orig @@ -0,0 +1,1152 @@ +import json +import logging +import os +import sys +import textwrap +import uuid + +from troposphere import Base64, FindInMap, GetAZs, GetAtt, Join, Output, Ref, Tags, Template +from troposphere.autoscaling import AutoScalingGroup, BlockDeviceMapping, \ + EBSBlockDevice, LaunchConfiguration, Tag +from troposphere.ec2 import InternetGateway, Route, RouteTable, SecurityGroup, \ + SecurityGroupIngress, Subnet, SubnetRouteTableAssociation, VPC, \ + VPCGatewayAttachment +from troposphere.elasticache import ReplicationGroup, SubnetGroup + +from troposphere.elasticloadbalancing import ConnectionDrainingPolicy, \ + HealthCheck, LoadBalancer, Policy +from troposphere.iam import InstanceProfile, PolicyType, Role +from troposphere.rds import DBInstance, DBSubnetGroup +from troposphere.route53 import AliasTarget, RecordSet, RecordSetGroup +from troposphere.s3 import Bucket, BucketPolicy + +import yaml + +from bootstrap_cfn import errors, mime_packer, utils + + +class ProjectConfig: + + config = None + + def __init__(self, config, environment, passwords=None): + try: + self.config = self.load_yaml(config)[environment] + except KeyError: + raise errors.BootstrapCfnError("Environment " + environment + " not found") + + if passwords: + passwords_dict = self.load_yaml(passwords)[environment] + self.config = utils.dict_merge(self.config, passwords_dict) + + @staticmethod + def load_yaml(fp): + if os.path.exists(fp): + return yaml.load(open(fp).read()) + + +class ConfigParser(object): + + config = {} + + def __init__(self, data, stack_name, environment=None, application=None): + self.stack_name = stack_name + self.stack_id = uuid.uuid4().__str__()[-8:] + self.data = data + + # Some things possibly used in user data templates + self.environment = environment + self.application = application + + def process(self): + template = self.base_template() + + vpc = self.vpc() + map(template.add_resource, vpc) + + iam = self.iam() + map(template.add_resource, iam) + + ec2 = self.ec2() + map(template.add_resource, ec2) + + if 'elb' in self.data: + self.elb(template) + + if 'rds' in self.data: + self.rds(template) + + if 'elasticache' in self.data: + self.elasticache(template) + + if 's3' in self.data: + self.s3(template) + + template = json.loads(template.to_json()) + if 'includes' in self.data: + for inc_path in self.data['includes']: + inc = json.load(open(inc_path)) + template = utils.dict_merge(template, inc) + return json.dumps( + template, sort_keys=True, indent=4, separators=(',', ': ')) + + def base_template(self): + from bootstrap_cfn import vpc + t = Template() + + # Get the OS specific data + os_data = self._get_os_data() + t.add_mapping("AWSRegion2AMI", { + os_data.get('region'): {"AMI": os_data.get('ami')}, + }) + + if 'vpc' in self.data: + logging.info('bootstrap-cfn::base_template: Using configuration VPC address settings') + vpc_data = self.data.get('vpc', {}) + vpc_cidr = vpc_data.get('CIDR', '10.0.0.0/16') + subneta_cidr = vpc_data.get('SubnetA', '10.0.0.0/20') + subnetb_cidr = vpc_data.get('SubnetB', '10.0.16.0/20') + subnetc_cidr = vpc_data.get('SubnetC', '10.0.32.0/20') + t.add_mapping("SubnetConfig", { + "VPC": { + "CIDR": vpc_cidr, + "SubnetA": subneta_cidr, + "SubnetB": subnetb_cidr, + "SubnetC": subnetc_cidr + } + }) + else: + default_vpc_cidr_prefix = 24 + default_vpc_subnet_prefix = 28 + default_vpc_subnet_count = 3 + + # Try to get random CIDR + available_cidr_block, subnet_cidr_blocks = ( + vpc.get_available_cidr_block( + default_vpc_cidr_prefix, + subnet_prefix=default_vpc_subnet_prefix) + ) + if available_cidr_block and len(subnet_cidr_blocks) > (default_vpc_subnet_count - 1): + logging.info('bootstrap-cfn::base_template: Using dynamic VPC address settings') + vpc_cidr = available_cidr_block + subneta_cidr = subnet_cidr_blocks[0] + subnetb_cidr = subnet_cidr_blocks[1] + subnetc_cidr = subnet_cidr_blocks[2] + else: + # Fallback to default + logging.info('bootstrap-cfn::base_template: Using static fallback VPC address settings') + vpc_cidr = "10.0.0.0/24" + subneta_cidr = "10.0.0.0/20" + subnetb_cidr = "10.0.16.0/20" + subnetc_cidr = "10.0.32.0/20" + + t.add_mapping("SubnetConfig", { + "VPC": { + "CIDR": vpc_cidr, + "SubnetA": subneta_cidr, + "SubnetB": subnetb_cidr, + "SubnetC": subnetc_cidr + } + }) + + return t + + def vpc(self): + + vpc = VPC( + "VPC", + InstanceTenancy="default", + EnableDnsSupport="true", + CidrBlock=FindInMap("SubnetConfig", "VPC", "CIDR"), + EnableDnsHostnames="true", + ) + + subnet_a = Subnet( + "SubnetA", + VpcId=Ref(vpc), + AvailabilityZone="eu-west-1a", + CidrBlock=FindInMap("SubnetConfig", "VPC", "SubnetA"), + Tags=Tags( + Application=Ref("AWS::StackId"), + Network="Public", + ), + ) + + subnet_b = Subnet( + "SubnetB", + VpcId=Ref(vpc), + AvailabilityZone="eu-west-1b", + CidrBlock=FindInMap("SubnetConfig", "VPC", "SubnetB"), + Tags=Tags( + Application=Ref("AWS::StackId"), + Network="Public", + ), + ) + + subnet_c = Subnet( + "SubnetC", + VpcId=Ref(vpc), + AvailabilityZone="eu-west-1c", + CidrBlock=FindInMap("SubnetConfig", "VPC", "SubnetC"), + Tags=Tags( + Application=Ref("AWS::StackId"), + Network="Public", + ), + ) + + igw = InternetGateway( + "InternetGateway", + Tags=Tags( + Application=Ref("AWS::StackId"), + Network="Public", + ), + ) + + gw_attachment = VPCGatewayAttachment( + "AttachGateway", + VpcId=Ref(vpc), + InternetGatewayId=Ref(igw), + ) + + route_table = RouteTable( + "PublicRouteTable", + VpcId=Ref(vpc), + Tags=Tags( + Application=Ref("AWS::StackId"), + Network="Public", + ), + ) + + public_route = Route( + "PublicRoute", + GatewayId=Ref(igw), + DestinationCidrBlock="0.0.0.0/0", + RouteTableId=Ref(route_table), + DependsOn=gw_attachment.title + ) + + subnet_a_route_assoc = SubnetRouteTableAssociation( + "SubnetRouteTableAssociationA", + SubnetId=Ref(subnet_a), + RouteTableId=Ref(route_table), + ) + + subnet_b_route_assoc = SubnetRouteTableAssociation( + "SubnetRouteTableAssociationB", + SubnetId=Ref(subnet_b), + RouteTableId=Ref(route_table), + ) + + subnet_c_route_assoc = SubnetRouteTableAssociation( + "SubnetRouteTableAssociationC", + SubnetId=Ref(subnet_c), + RouteTableId=Ref(route_table), + ) + + resources = [vpc, subnet_a, subnet_b, subnet_c, igw, gw_attachment, + public_route, route_table, subnet_a_route_assoc, + subnet_b_route_assoc, subnet_c_route_assoc] + + # Hack until we return troposphere objects directly + # return json.loads(json.dumps(dict((r.title, r) for r in resources), cls=awsencode)) + return resources + + def iam(self): + role = Role( + "BaseHostRole", + Path="/", + AssumeRolePolicyDocument={ + "Statement": [{ + "Action": ["sts:AssumeRole"], + "Effect": "Allow", + "Principal": {"Service": ["ec2.amazonaws.com"]} + }] + }, + ) + + role_policies = PolicyType( + "RolePolicies", + PolicyName="BaseHost", + PolicyDocument={"Statement": [ + {"Action": ["autoscaling:Describe*"], "Resource": "*", "Effect": "Allow"}, + {"Action": ["ec2:Describe*"], "Resource": "*", "Effect": "Allow"}, + {"Action": ["ec2:CreateTags"], "Resource": "*", "Effect": "Allow"}, + {"Action": ["rds:Describe*"], "Resource": "*", "Effect": "Allow"}, + {"Action": ["elasticloadbalancing:Describe*"], "Resource": "*", "Effect": "Allow"}, + {"Action": ["elasticache:Describe*"], "Resource": "*", "Effect": "Allow"}, + {"Action": ["cloudformation:Describe*"], "Resource": "*", "Effect": "Allow"}, + {"Action": ["s3:List*"], "Resource": "*", "Effect": "Allow"} + ]}, + Roles=[Ref(role)], + ) + instance_profile = InstanceProfile( + "InstanceProfile", + Path="/", + Roles=[Ref(role)], + ) + + resources = [role, role_policies, instance_profile] + # Hack until we return troposphere objects directly + # return json.loads(json.dumps(dict((r.title, r) for r in resources), cls=awsencode)) + return resources + + def s3(self, template): + """ + Create an s3 resource configuration from the config file data. + This will produce Bucket and BucketPolicy resources along with + the bucket name as output, these are all added to the troposphere + template. + + Args: + template: + The troposphere.Template object + """ + # As there are no required fields, although we may not have any + # subkeys we still need to be able to have a parent key 's3:' to + # signify that we want to create an s3 bucket. In this case we + # set up an empty (no options set) dictionary + present_keys = {} + if isinstance(self.data['s3'], dict): + present_keys = self.data['s3'].keys() + + # Enable specifying multiple buckets + if 'buckets' in present_keys: + bucket_list = self.data['s3'].get('buckets') + for bucket_config in bucket_list: + self.create_s3_bucket(bucket_config, template) + + # If the static bucket name is manually set then use that, + # otherwise use the -- + # default + bucket = Bucket( + "StaticBucket", + AccessControl="BucketOwnerFullControl", + ) + if 'static-bucket-name' in present_keys: + bucket.BucketName = self.data['s3']['static-bucket-name'] + + # If a policy has been manually set then use it, otherwise set + # a reasonable default of public 'Get' access + if 'policy' in present_keys: + policy = json.loads(open(self.data['s3']['policy']).read()) + else: + arn = Join("", ["arn:aws:s3:::", Ref(bucket), "/*"]) + policy = { + 'Action': ['s3:GetObject'], + "Resource": arn, + 'Effect': 'Allow', + 'Principal': '*'} + + bucket_policy = BucketPolicy( + "StaticBucketPolicy", + Bucket=Ref(bucket), + PolicyDocument={"Statement": [policy]}, + ) + # Add the bucket name to the list of cloudformation + # outputs + template.add_output(Output( + "StaticBucketName", + Description="S3 bucket name", + Value=Ref(bucket) + )) + + # Add the resources to the troposphere template + map(template.add_resource, [bucket, bucket_policy]) + + def create_s3_bucket(self, bucket_config, template): + """ + Create an s3 bucket configuration from config data. + This will produce Bucket and BucketPolicy resources along with + the bucket name as output, these are all added to the troposphere + template. + + Args: + bucket_config(dictionary): Keyed bucket config settings + template: + The troposphere.Template object + """ + bucket_name = bucket_config.get('name') + bucket = Bucket( + bucket_name, + AccessControl="BucketOwnerFullControl", + ) + + # If a policy has been manually set then use it, otherwise set + # a reasonable default of public 'Get' access + if 'policy' in bucket_config: + policy = json.loads(open(bucket_config['policy']).read()) + else: + arn = Join("", ["arn:aws:s3:::", Ref(bucket), "/*"]) + policy = { + 'Action': ['s3:DeleteObject', 's3:GetObject', 's3:PutObject'], + "Resource": arn, + 'Effect': 'Allow', + 'Principal': '*', + "Condition": { + "StringEquals": { + "aws:sourceVpc": {"Ref": "VPC"} + } + } + } + bucket_policy = BucketPolicy( + "{}Policy".format(bucket_name), + Bucket=Ref(bucket), + PolicyDocument={"Statement": [policy]}, + ) + # Add the bucket name to the list of cloudformation + # outputs + template.add_output(Output( + "{}Policy".format(bucket_name), + Description="S3 bucket name", + Value=Ref(bucket) + )) + + map(template.add_resource, [bucket, bucket_policy]) + + def ssl(self): + return self.data['ssl'] + + def rds(self, template): + """ + Create an RDS resource configuration from the config file data + and add it to the troposphere.Template. Outputs for the RDS name, + host and port are created. + + Args: + template: + The troposphere.Template object + """ + # REQUIRED FIELDS MAPPING + required_fields = { + 'db-name': 'DBName', + 'storage': 'AllocatedStorage', + 'storage-type': 'StorageType', + 'backup-retention-period': 'BackupRetentionPeriod', + 'db-master-username': 'MasterUsername', + 'db-master-password': 'MasterUserPassword', + 'db-engine': 'Engine', + 'db-engine-version': 'EngineVersion', + 'instance-class': 'DBInstanceClass', + 'multi-az': 'MultiAZ' + } + + optional_fields = { + 'storage-encrypted': 'StorageEncrypted', + 'identifier': 'DBInstanceIdentifier' + } + + # LOAD STACK TEMPLATE + resources = [] + rds_subnet_group = DBSubnetGroup( + "RDSSubnetGroup", + SubnetIds=[Ref("SubnetA"), Ref("SubnetB"), Ref("SubnetC")], + DBSubnetGroupDescription="VPC Subnets" + ) + resources.append(rds_subnet_group) + + database_sg = SecurityGroup( + "DatabaseSG", + SecurityGroupIngress=[ + {"ToPort": 5432, + "FromPort": 5432, + "IpProtocol": "tcp", + "CidrIp": FindInMap("SubnetConfig", "VPC", "CIDR")}, + {"ToPort": 1433, + "FromPort": 1433, + "IpProtocol": "tcp", + "CidrIp": FindInMap("SubnetConfig", "VPC", "CIDR")}, + {"ToPort": 3306, + "FromPort": 3306, + "IpProtocol": "tcp", + "CidrIp": FindInMap("SubnetConfig", "VPC", "CIDR")} + ], + VpcId=Ref("VPC"), + GroupDescription="SG for EC2 Access to RDS", + ) + resources.append(database_sg) + + rds_instance = DBInstance( + "RDSInstance", + PubliclyAccessible=False, + AllowMajorVersionUpgrade=False, + AutoMinorVersionUpgrade=False, + VPCSecurityGroups=[GetAtt(database_sg, "GroupId")], + DBSubnetGroupName=Ref(rds_subnet_group), + StorageEncrypted=False, + DependsOn=database_sg.title + ) + resources.append(rds_instance) + + # We *cant* specify db-name for SQL Server based RDS instances. :( + if 'db-engine' in self.data['rds'] and self.data['rds']['db-engine'].startswith("sqlserver"): + required_fields.pop('db-name') + + # TEST FOR REQUIRED FIELDS AND EXIT IF MISSING ANY + for yaml_key, rds_prop in required_fields.iteritems(): + if yaml_key not in self.data['rds']: + print "\n\n[ERROR] Missing RDS fields [%s]" % yaml_key + sys.exit(1) + else: + rds_instance.__setattr__(rds_prop, self.data['rds'][yaml_key]) + + for yaml_key, rds_prop in optional_fields.iteritems(): + if yaml_key in self.data['rds']: + rds_instance.__setattr__(rds_prop, self.data['rds'][yaml_key]) + + # Add resources and outputs + map(template.add_resource, resources) + template.add_output(Output( + "dbhost", + Description="RDS Hostname", + Value=GetAtt(rds_instance, "Endpoint.Address") + )) + template.add_output(Output( + "dbport", + Description="RDS Port", + Value=GetAtt(rds_instance, "Endpoint.Port") + )) + + def elasticache(self, template): + """ + Create an elasticache resource configuration from the config file data + and add it to the troposphere.Template. Outputs for the elasticache name, + host and port are created. + + Args: + template: + The troposphere.Template object + """ + # REQUIRED FIELDS MAPPING + required_fields = { + } + + optional_fields = { + 'clusters': 'NumCacheClusters', + 'node_type': 'CacheNodeType', + 'port': 'Port', + } + + # Setup params and config + component_config = self.data['elasticache'] + # Setup defaults + if 'clusters' not in component_config: + component_config['clusters'] = 3 + if 'node_type' not in component_config: + component_config['node_type'] = 'cache.m1.small' + if 'port' not in component_config: + component_config['port'] = 6379 + + engine = 'redis' + + # Generate snapshot arns + seeds = component_config.get('seeds', None) + snapshot_arns = [] + if seeds: + # Get s3 seeds + s3_seeds = seeds.get('s3', []) + for seed in s3_seeds: + snapshot_arns.append("arn:aws:s3:::%s" % (seed)) + + # LOAD STACK TEMPLATE + resources = [] + + es_sg = SecurityGroup( + "ElasticacheSG", + SecurityGroupIngress=[ + {"ToPort": component_config['port'], + "FromPort": component_config['port'], + "IpProtocol": "tcp", + "CidrIp": FindInMap("SubnetConfig", "VPC", "CIDR")} + ], + VpcId=Ref("VPC"), + GroupDescription="SG for EC2 Access to Elasticache", + ) + resources.append(es_sg) + + es_subnet_group = SubnetGroup( + 'ElasticacheSubnetGroup', + Description="Elasticache Subnet Group", + SubnetIds=[Ref("SubnetA"), Ref("SubnetB"), Ref("SubnetC")] + ) + resources.append(es_subnet_group) + + elasticache_replication_group = ReplicationGroup( + "ElasticacheReplicationGroup", + ReplicationGroupDescription='Elasticache Replication Group', + Engine=engine, + NumCacheClusters=component_config['clusters'], + CacheNodeType=component_config['node_type'], + SecurityGroupIds=[GetAtt(es_sg, "GroupId")], + CacheSubnetGroupName=Ref(es_subnet_group), + SnapshotArns=snapshot_arns + ) + resources.append(elasticache_replication_group) + + # TEST FOR REQUIRED FIELDS AND EXIT IF MISSING ANY + for yaml_key, prop in required_fields.iteritems(): + if yaml_key not in component_config: + print "\n\n[ERROR] Missing Elasticache fields [%s]" % yaml_key + sys.exit(1) + else: + elasticache_replication_group.__setattr__(prop, component_config[yaml_key]) + + for yaml_key, prop in optional_fields.iteritems(): + if yaml_key in component_config: + elasticache_replication_group.__setattr__(prop, component_config[yaml_key]) + + # Add resources and outputs + map(template.add_resource, resources) + + template.add_output(Output( + "ElasticacheReplicationGroupName", + Description="Elasticache Replication Group Name", + Value=Ref(elasticache_replication_group) + )) + template.add_output(Output( + "ElasticacheEngine", + Description="Elasticache Engine", + Value=engine + )) + + def elb(self, template): + """ + Create an ELB resource configuration from the config file data + and add them to the troposphere template. Outputs for each ELB's + DNSName are created. + + Args: + template: + The cloudformation template file + """ + # REQUIRED FIELDS AND MAPPING + # Note, 'name' field is used internally to help label + # logical ids, and as part of the DNS record name. + required_fields = { + 'listeners': 'Listeners', + 'scheme': 'Scheme', + 'name': None, + 'hosted_zone': 'HostedZoneName' + } + + elb_list = [] + elb_sgs = [] + # COULD HAVE MULTIPLE ELB'S (PUBLIC / PRIVATE etc) + for elb in self.data['elb']: + safe_name = elb['name'].replace('-', '').replace('.', '').replace('_', '') + # TEST FOR REQUIRED FIELDS AND EXIT IF MISSING ANY + for i in required_fields.keys(): + if i not in elb.keys(): + print "\n\n[ERROR] Missing ELB fields [%s]" % i + sys.exit(1) + + # Collect together all policies + elb_policies = [ + Policy( + Attributes=[{'Name': "Reference-Security-Policy", 'Value': "ELBSecurityPolicy-2015-05"}], + PolicyType='SSLNegotiationPolicyType', + PolicyName='PinDownSSLNegotiationPolicy201505' + )] + for custom_policy_config in elb.get('policies', []): + custom_policy_name = custom_policy_config.get('name', False) + custom_policy_type = custom_policy_config.get('type', False) + + if not custom_policy_name: + logging.critical("config::elb: Load balancer policy must have a name defined") + sys.exit(1) + if not custom_policy_type: + logging.critical("config::elb: Load balancer policy {} must have a type defined".format(custom_policy_name)) + sys.exit(1) + + custom_policy_attributes = [] + for custom_policy_attribute_config in custom_policy_config.get('attributes', []): + for custom_policy_attribute_key, custom_policy_attribute_val in custom_policy_attribute_config.iteritems(): + custom_policy_attributes_entry = { + 'Name': custom_policy_attribute_key, + 'Value': custom_policy_attribute_val + } + custom_policy_attributes.append(custom_policy_attributes_entry) + + custom_policy = Policy( + Attributes=custom_policy_attributes, + PolicyType=custom_policy_type, + PolicyName=custom_policy_name, + ) + # Dont set these unless theyre in the config, other CFN will break + if custom_policy_config.get('instance_ports', False): + custom_policy.InstancePorts = custom_policy_config.get('instance_ports') + if custom_policy_config.get('load_balancer_ports', False): + custom_policy.LoadBalancerPorts = custom_policy_config.get('load_balancer_ports') + + elb_policies.append(custom_policy) + + load_balancer = LoadBalancer( + "ELB" + safe_name, + Subnets=[Ref("SubnetA"), Ref("SubnetB"), Ref("SubnetC")], + Listeners=elb['listeners'], + Scheme=elb['scheme'], + ConnectionDrainingPolicy=ConnectionDrainingPolicy( + Enabled=True, + Timeout=120, + ), + Policies=elb_policies + ) + if "health_check" in elb: + load_balancer.HealthCheck = HealthCheck(**elb['health_check']) + + for listener in load_balancer.Listeners: + if listener['Protocol'] == 'HTTPS': + try: + cert_name = elb['certificate_name'] + except KeyError: + raise errors.CfnConfigError( + "HTTPS listener but no certificate_name specified") + try: + self.ssl()[cert_name]['cert'] + self.ssl()[cert_name]['key'] + except KeyError: + raise errors.CfnConfigError( + "Couldn't find ssl cert {0} in config file".format(cert_name)) + + listener["SSLCertificateId"] = Join("", [ + "arn:aws:iam::", + Ref("AWS::AccountId"), + ":server-certificate/", + "{0}-{1}".format(cert_name, self.stack_name)] + ) + # if not present, add the default cipher policy + if 'PolicyNames' not in listener: + logging.debug( + "ELB Listener for port 443 has no SSL Policy. " + + "Using default ELBSecurityPolicy-2015-05") + listener['PolicyNames'] = ['PinDownSSLNegotiationPolicy201505'] + """ + # Get all the listeners policy names and setup the policies they refer to + for policy_name in listener.get('PolicyNames', []): + matched_policies = [custom_policy for custom_policy in elb_policies + if custom_policy.PolicyName == policy_name] + assert(len(matched_policies) == 1) + matched_policy = matched_policies[0] + # Get the current ports defined in the troposphere policies config and append + # the listers ports + updated_instance_ports = matched_policy.properties.get('InstancePorts', []) + updated_instance_ports.append("{}".format(listener['InstancePort'])) + matched_policy.properties['InstancePorts'] = updated_instance_ports + updated_instance_ports = matched_policy.properties.get('LoadBalancerPorts', []) + updated_instance_ports.append("{}".format(listener['LoadBalancerPort'])) + matched_policy.properties['LoadBalancerPorts'] = updated_instance_ports + """ + elb_list.append(load_balancer) + + dns_record = RecordSetGroup( + "DNS" + safe_name, + HostedZoneName=elb['hosted_zone'], + Comment="Zone apex alias targeted to ElasticLoadBalancer.", + RecordSets=[ + RecordSet( + "TitleIsIgnoredForThisResource", + Name="%s-%s.%s" % (elb['name'], self.stack_id, elb['hosted_zone']), + Type="A", + AliasTarget=AliasTarget( + GetAtt(load_balancer, "CanonicalHostedZoneNameID"), + GetAtt(load_balancer, "DNSName"), + ), + ), + ] + ) + elb_list.append(dns_record) + + elb_role_policies = PolicyType( + "Policy" + safe_name, + PolicyName=safe_name + "BaseHost", + PolicyDocument={"Statement": [{ + "Action": [ + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer" + ], + "Resource": [ + Join("", [ + "arn:aws:elasticloadbalancing:", + Ref("AWS::Region"), + ":", + Ref("AWS::AccountId"), + ':loadbalancer/', + Ref(load_balancer) + ]) + ], + "Effect": "Allow"} + ]}, + Roles=[Ref("BaseHostRole")], + ) + elb_list.append(elb_role_policies) + + if "security_groups" in elb: + load_balancer.SecurityGroups = [] + for sg_name, sg_rules in elb['security_groups'].items(): + sg = SecurityGroup( + sg_name, + GroupDescription=sg_name, + SecurityGroupIngress=sg_rules, + VpcId=Ref("VPC") + ) + load_balancer.SecurityGroups.append(Ref(sg)) + elb_sgs.append(sg) + else: + sg = SecurityGroup( + "DefaultSG" + safe_name, + GroupDescription="DefaultELBSecurityGroup", + SecurityGroupIngress=[ + { + "IpProtocol": "tcp", + "FromPort": 443, + "ToPort": 443, + "CidrIp": "0.0.0.0/0" + }, + { + "IpProtocol": "tcp", + "FromPort": 80, + "ToPort": 80, + "CidrIp": "0.0.0.0/0" + } + ], + VpcId=Ref("VPC") + ) + load_balancer.SecurityGroups = [Ref(sg)] + elb_sgs.append(sg) + + # Add outputs + output_name = "ELB" + safe_name + logging.debug("config:elb:Adding output to ELB '%s'" % (output_name)) + template.add_output(Output( + output_name, + Description="ELB DNSName", + Value=GetAtt(load_balancer, "DNSName") + )) + + # Update template with ELB resources + map(template.add_resource, elb_list) + map(template.add_resource, elb_sgs) + template = self._attach_elbs(template) + + def _convert_ref_dict_to_objects(self, o): + """ + Some troposphere objects need troposphere.Ref objects instead of a + plain dict of {"Ref": "x" }. This helper function will do such + transformations and return a new dict + """ + def ref_fixup(x): + if isinstance(x, dict) and "Ref" in x: + return Ref(x["Ref"]) + else: + return x + return dict([(k, ref_fixup(v)) for k, v in o.items()]) + + def get_ec2_userdata(self): + """ + Build and return the user_data that'll be used for ec2 instances. + This contains a series of required entries, default config, and + and data specified in the template. + """ + os_data = self._get_os_data() + data = self.data['ec2'] + parts = [] + + ami_type = os_data.get('type') + + # Below is the ami flavour specific defaults + if ami_type == 'linux': + parts.append({ + 'content': yaml.dump( + { + 'package_update': True, + 'package_upgrade': True, + 'package_reboot_if_required': True + } + ), + 'mime_type': 'text/cloud-config' + }) + + boothook = self.get_hostname_boothook(data) + + if boothook: + parts.append(boothook) + + if "cloud_config" in data: + parts.append({ + 'content': yaml.dump(data['cloud_config']), + 'mime_type': 'text/cloud-config' + }) + elif boothook: + # If the hostname boothook is specified then make sure we include + # the 'manage_hostname' cloud-init config so that `sudo` doesn't + # complaint about unable to resolve host name + parts.append({ + 'content': yaml.dump({'manage_etc_hosts': True}), + 'mime_type': 'text/cloud-config' + }) + + if len(parts): + return parts + + HOSTNAME_BOOTHOOK_TEMPLATE = textwrap.dedent("""\ + #!/bin/sh + [ -e /etc/cloud/cloud.cfg.d/99_hostname.cfg ] || echo "hostname: {hostname}" > /etc/cloud/cloud.cfg.d/99_hostname.cfg + """) + + DEFAULT_HOSTNAME_PATTERN = "{instance_id}.{environment}.{application}" + + def get_hostname_boothook(self, data): + """ + Return a boothook part that will set the hostname of instances on boot. + + The pattern comes from the ``hostname_pattern`` pattern of data dict, + with a default of "{instance_id}.{environment}.{application}". To + disable this functionality explicitly pass None in this field. + """ + hostname_pattern = data.get('hostname_pattern', self.DEFAULT_HOSTNAME_PATTERN) + if hostname_pattern is None: + return None + + interploations = { + # This gets interploated by cloud-init at run time. + 'instance_id': '${INSTANCE_ID}', + 'tags': data['tags'], + 'environment': self.environment, + 'application': self.application, + 'stack_name': self.stack_name, + } + try: + hostname = hostname_pattern.format(**interploations) + except KeyError as e: + raise errors.CfnHostnamePatternError("Error interpolating hostname_pattern '{pattern}' - {key} is not a valid interpolation".format( + pattern=hostname_pattern, + key=e.args[0])) + + # Warn the user that they probably want to set 'manage_etc_hosts' + if "cloud_config" in data and "manage_etc_hosts" not in data['cloud_config']: + logging.warning( + "config: 'hostname_pattern' boothook is being " + + "generated but 'manage_etc_hosts' has not been specified in " + + "'cloud_config' -- you probably want to specify this as True " + + "otherwise you will get hostname resolution errors." + ) + + return { + 'mime_type': 'text/cloud-boothook', + 'content': self.HOSTNAME_BOOTHOOK_TEMPLATE.format(hostname=hostname) + } + + def ec2(self): + # LOAD STACK TEMPLATE + data = self.data['ec2'] + resources = [] + sgs = [] + + for sg_name, ingress in data['security_groups'].items(): + sg = SecurityGroup( + sg_name, + VpcId=Ref("VPC"), + GroupDescription="BaseHost Security Group", + ) + + sgs.append(sg) + resources.append(sg) + + # Because we want to be able to add ingress rules to a security + # group that referes to itself (for example allow all instances in + # the sg to speak to each other on 9300 for Elasticsearch + # clustering) we create the SG in one resource and rules as other + # resources + # + # The yaml for this case is: + # + # security_groups: + # EScluster: + # - FromPort: 9300 + # - ToPort: 9300 + # - SourceSecurityGroupId: { Ref: EScluster } + for idx, rule in enumerate(ingress): + # Convert { Ref: "x"} to Ref("x") + rule = self._convert_ref_dict_to_objects(rule) + + ingress = SecurityGroupIngress( + "{}Rule{}".format(sg_name, idx), + GroupId=Ref(sg), + **rule) + resources.append(ingress) + + devices = [] + try: + for i in data['block_devices']: + device_name = i['DeviceName'] + volume_size = i.get('VolumeSize', 20) + volume_type = i.get('VolumeType', 'standard') + iops = i.get('Iops', None) + # Check we have a permitted volume type + if volume_type not in ['standard', 'gp2', 'io1']: + raise errors.CfnConfigError("config: Volume type '%s' but must be one of standard', 'gp2' or 'io1" + % (volume_type)) + # We need to specifiy iops if we have a volume type of io1 + if volume_type == 'io1' and not iops: + raise errors.CfnConfigError("config: Volume type io1 must have Iops defined") + + # We dont set a default for iops and troposphere doesnt handle this well + if not iops: + ebs = EBSBlockDevice(VolumeType=volume_type, VolumeSize=volume_size) + else: + ebs = EBSBlockDevice(VolumeType=volume_type, VolumeSize=volume_size, Iops=iops) + + devices.append(BlockDeviceMapping( + DeviceName=device_name, + Ebs=ebs + )) + + except KeyError: + devices.append(BlockDeviceMapping( + DeviceName="/dev/sda1", + Ebs=EBSBlockDevice(VolumeSize=20), + )) + + launch_config = LaunchConfiguration( + "BaseHostLaunchConfig", + KeyName=data['parameters']['KeyName'], + SecurityGroups=[Ref(g) for g in sgs], + InstanceType=data['parameters']['InstanceType'], + AssociatePublicIpAddress=True, + IamInstanceProfile=Ref("InstanceProfile"), + ImageId=FindInMap("AWSRegion2AMI", Ref("AWS::Region"), "AMI"), + BlockDeviceMappings=devices, + ) + + user_data = self.get_ec2_userdata() + if user_data: + user_data = mime_packer.pack(user_data) + launch_config.UserData = Base64(user_data) + + resources.append(launch_config) + + # Allow deprecation of tags + ec2_tags = [] + deprecated_tags = ["Env"] + # Add a name tag for easy ec2 instance identification in the AWS console + if data['tags'].get("Name", None) is None: + ec2_tags.append(self._get_default_resource_name_tag(type="ec2")) + # Get all tags from the config + for k, v in data['tags'].items(): + if k not in deprecated_tags: + ec2_tags.append(Tag(k, v, True)) + else: + logging.warning("config: Tag '%s' is deprecated.." + % (k)) + + # Setup ASG defaults + auto_scaling_config = data.get('auto_scaling', {}) + asg_min_size = auto_scaling_config.get('min', 1) + asg_max_size = auto_scaling_config.get('max', 5) + asg_desired_size = auto_scaling_config.get('desired', 2) + health_check_type = auto_scaling_config.get('health_check_type', 'EC2').upper() + # The basic EC2 healthcheck has a low grace period need, if we switch to ELB then + # theres a lot more setup to be done before we should attempt a healthcheck + if health_check_type == 'ELB': + default_health_check_grace_period = 600 + else: + default_health_check_grace_period = 300 + health_check_grace_period = auto_scaling_config.get('health_check_grace_period', default_health_check_grace_period) + scaling_group = AutoScalingGroup( + "ScalingGroup", + VPCZoneIdentifier=[Ref("SubnetA"), Ref("SubnetB"), Ref("SubnetC")], + MinSize=asg_min_size, + MaxSize=asg_max_size, + DesiredCapacity=asg_desired_size, + AvailabilityZones=GetAZs(), + Tags=ec2_tags, + LaunchConfigurationName=Ref(launch_config), + HealthCheckGracePeriod=health_check_grace_period, + HealthCheckType=health_check_type, + ) + resources.append(scaling_group) + + return resources + + @classmethod + def _find_resources(cls, template, resource_type): + f = lambda x: x.resource_type == resource_type + return filter(f, template.resources.values()) + + @classmethod + def _get_elb_canonical_name(cls, elb_yaml_name): + return 'ELB-{}'.format(elb_yaml_name.replace('.', '')) + + def _attach_elbs(self, template): + if 'elb' not in self.data: + return template + asgs = self._find_resources(template, + 'AWS::AutoScaling::AutoScalingGroup') + if len(asgs) > 0: + elbs = self._find_resources(template, + 'AWS::ElasticLoadBalancing::LoadBalancer') + asgs[0].LoadBalancerNames = [Ref(x) for x in elbs] + template.resources[asgs[0].title] = asgs[0] + + return template + + def _get_os_data(self): + """ + Get details about the OS from the config data + + Return: + os_data(dict): Dictionary of OS data in the form + { + 'name': 'ubuntu-1404', + 'ami': 'ami-f9a62c8a', + 'region': 'eu-west-1', + 'distribution': 'ubuntu', + 'type': 'linux', + 'release': '20160509.1' + } + + Exceptions: + OSTypeNotFoundError: Raised when the OS in the config file is not + recognised + """ + os_default = 'ubuntu-1404' + available_types = { + 'ubuntu-1404': { + 'name': 'ubuntu-1404', + 'ami': 'ami-f95ef58a', + 'region': 'eu-west-1', + 'distribution': 'ubuntu', + 'type': 'linux', + 'release': '20160509.1' + }, + 'windows2012': { + 'name': 'windows2012', + 'ami': 'ami-8519a9f6', + 'region': 'eu-west-1', + 'distribution': 'windows', + 'type': 'windows', + 'release': '2015.12.31' + } + } + os_choice = self.data['ec2'].get('os', os_default) + if not available_types.get(os_choice, False): + raise errors.OSTypeNotFoundError(self.data['ec2']['os'], available_types.keys()) + os_data = available_types.get(os_choice) + ami = self.data['ec2'].get('ami') + if ami: + logging.info('** Using override AMI of ' + str(ami)) + os_data['ami'] = ami + logging.info('overridden os data is: ' + repr(os_data)) + return os_data + + def _get_default_resource_name_tag(self, type): + """ + Get the name tag we will use for ec2 instances + + Returns: + name_tag(string): The Name: tag to use. + type(string): The type of the resource + """ + # Use the stack name as the tag + value = Join("", [{"Ref": "AWS::StackName"}, "-", type]) + name_tag = Tag("Name", value, True) + return name_tag diff --git a/bootstrap_cfn/config.py.rej b/bootstrap_cfn/config.py.rej new file mode 100644 index 0000000..f684869 --- /dev/null +++ b/bootstrap_cfn/config.py.rej @@ -0,0 +1,17 @@ +*************** +*** 744,750 **** + RecordSets=[ + RecordSet( + "TitleIsIgnoredForThisResource", +- Name="%s.%s" % (elb['name'], elb['hosted_zone']), + Type="A", + AliasTarget=AliasTarget( + GetAtt(load_balancer, "CanonicalHostedZoneNameID"), +--- 744,750 ---- + RecordSets=[ + RecordSet( + "TitleIsIgnoredForThisResource", ++ Name="%s-%s.%s" % (elb['name'], self.stack_id, elb['hosted_zone']), + Type="A", + AliasTarget=AliasTarget( + GetAtt(load_balancer, "CanonicalHostedZoneNameID"), diff --git a/bootstrap_cfn/fab_tasks.py b/bootstrap_cfn/fab_tasks.py index c0d764b..c36d2ed 100755 --- a/bootstrap_cfn/fab_tasks.py +++ b/bootstrap_cfn/fab_tasks.py @@ -393,18 +393,22 @@ def set_stack_name(): dns records to retreive it in the future. """ + stack_suffix = uuid.uuid4().__str__()[-8:] if hasattr(env, 'tag'): - stack_tag = env.tag + # should check if the tag is used already + if env.tag == 'active': + raise CfnConfigError("tag cannot be 'active'") + else: + stack_tag = env.tag else: - stack_tag = 'active' + stack_tag = stack_suffix env.tag = stack_tag legacy_name = "{0}-{1}".format(env.application, env.environment) # get_config needs a stack_name so this is a hack because we don't # know it yet... env.stack_name = 'temp' - cfn_config = get_config() r53_conn = get_connection(R53) - zone_name = cfn_config.data.get('master_zone', None) + zone_name = get_basic_config().get('master_zone', None) if not zone_name: raise CfnConfigError("No master_zone in yaml, unable to create/find DNS records for stack name") logger.info("fab_tasks::set_stack_name: Found master zone '{}' in config...".format(zone_name)) @@ -416,7 +420,6 @@ def set_stack_name(): "for zone name '{}'...".format(zone_id, zone_name)) record_name = "stack.{0}.{1}".format(stack_tag, legacy_name) - stack_suffix = uuid.uuid4().__str__()[-8:] record = "{0}.{1}".format(record_name, zone_name) logger.info("fab_tasks::set_stack_name: " "Creating stack suffix {} " diff --git a/bootstrap_cfn/fab_tasks.py.orig b/bootstrap_cfn/fab_tasks.py.orig new file mode 100755 index 0000000..4ad57e9 --- /dev/null +++ b/bootstrap_cfn/fab_tasks.py.orig @@ -0,0 +1,704 @@ +#!/usr/bin/env python + +import logging +import os +import sys +import time +import uuid + +import boto3 + +import dns.resolver + +from fabric.api import env, task +from fabric.colors import green, red +from fabric.utils import abort + +from bootstrap_cfn.autoscale import Autoscale +from bootstrap_cfn.cloudformation import Cloudformation +from bootstrap_cfn.config import ConfigParser, ProjectConfig +from bootstrap_cfn.elb import ELB +from bootstrap_cfn.errors import BootstrapCfnError, CfnConfigError, CloudResourceNotFoundError, DNSRecordNotFoundError, ZoneIDNotFoundError +from bootstrap_cfn.iam import IAM +from bootstrap_cfn.r53 import R53 +from bootstrap_cfn.utils import tail +from bootstrap_cfn.vpc import VPC + + +# Default fab config. Set via the tasks below or --set +env.setdefault('application') +env.setdefault('environment') +env.setdefault('aws') +env.setdefault('config') +env.setdefault('stack_passwords') +env.setdefault('blocking', True) +env.setdefault('aws_region', 'eu-west-1') + +# GLOBAL VARIABLES +TIMEOUT = 3600 +RETRY_INTERVAL = 10 + +# This is needed because pkgutil wont pick up modules +# imported in a fabfile. +path = env.real_fabfile or os.getcwd() +sys.path.append(os.path.dirname(path)) + +# Set up the logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("bootstrap-cfn") +logging.getLogger("requests").setLevel(logging.WARNING) + + +@task +def aws(profile_name): + """ + Set the AWS account to use + + Sets the environment variable 'aws' to the name of the + account to use in the AWS config file (~/.aws/credentials.yaml) + + Args: + profile_name(string): The string to set the environment + variable to + """ + env.aws = str(profile_name).lower() + # Setup boto so we actually use this environment + boto3.setup_default_session(profile_name=env.aws, + region_name=env.aws_region) + + +@task +def environment(environment_name): + """ + Set the environment section to be read from the project config + file + + Sets the environment variable 'environment'. + The named section will be read from the project's YAML file + + Args: + environment_name(string): The string to set the + variable to + """ + env.environment = str(environment_name).lower() + + +@task +def application(application_name): + """ + Set the application name + + Sets the environment variable 'application' to + an application name. Which is just a name to + associate with Cloudformation stack + + Args: + application_name(string): The string to set the + variable to + """ + env.application = str(application_name).lower() + + +@task +def tag(tag): + """ + Set a tag for the stack + + Sets the environment variable 'tag' + This gets used to store a DNS entry to identify + multiple stacks with the same name. + e.g. you can tag a stack as active, or inactive, + green or blue etc. + + Args: + tag(string): The string to set the + variable to + """ + env.tag = str(tag).lower() + + +@task +def config(config_file): + """ + Set the location of the project's YAML file + + Sets the environment variable 'config' to be + the location of the project's YAML config + file + + Args: + config_file(string): The string to set the + variable to + """ + env.config = str(config_file).lower() + + +@task +def passwords(passwords_file): + """ + Set the path to the project's password YAML config file + + Set the environment variable 'stack_passwords' to the + path of the project's password file. This will be used + to load in a dictionary of passwords to use with the + project's components + + Args: + passwords_file(string): The string to set the + variable to + """ + env.stack_passwords = str(passwords_file).lower() + + +@task +def blocking(block): + """ + Set to block while waiting for stack creation or deletion to complete + + Sets the environment variable 'blocking' to True to wait on stack + creation or deletion to complete before returning from the script. + If false the cloudformation task will be started and the script + will immediately exit + + Args: + block(string): The string to set the + variable to. Must be one of yes, true, + t or 1 + """ + env.blocking = str(block).lower() in ("yes", "true", "t", "1") + + +@task +def user(username): + """ + Sets the username to use for ssh to created instances + + Sets the environment variable 'user' to the ssh username + to use when trying to connect to a remote instance + + Args: + username(string): The string to set the + variable to. + """ + env.user = username + + + +def apply_maintenance_criteria(elb): + ''' + Applies maintenance criteria to elb + + Returns True if the maintenance should continue + ''' + return elb['scheme'] == 'internet-facing' + +@task +def swap_tags(tag1, tag2): + """ + Swap two tagged stacks. + + i.e. update the DNS text record which defines the + random suffix associated with a stack tag. + """ + cfn_config = get_config() + r53_conn = get_connection(R53) + zone_name = cfn_config.data['master_zone'] + zone_id = r53_conn.get_hosted_zone_id(zone_name) + legacy_name = "{0}-{1}".format(env.application, env.environment) + record1 = "stack.{0}.{1}".format(tag1, legacy_name) + record2 = "stack.{0}.{1}".format(tag2, legacy_name) + stack_suffix1 = r53_conn.get_record(zone_name, zone_id, record1, 'TXT') + stack_suffix2 = r53_conn.get_record(zone_name, zone_id, record2, 'TXT') + fqdn1 = "{0}.{1}".format(record1, zone_name) + fqdn2 = "{0}.{1}".format(record2, zone_name) + r53_conn.update_dns_record(zone_id, fqdn1, 'TXT', '"{0}"'.format(stack_suffix2)) + r53_conn.update_dns_record(zone_id, fqdn2, 'TXT', '"{0}"'.format(stack_suffix1)) + +@task +def enter_maintenance(maintenance_ip, dry_run=False): + ''' + Puts stack into maintenance mode + + Sets all internet facing elb hostnames to resolve to given maintenance_ip + ''' + cfn_config = get_config() + r53_conn = get_connection(R53) + + cached_zone_ids = {} + for elb in cfn_config.data['elb']: + if not apply_maintenance_criteria(elb): + continue + + record = "{name}.{hosted_zone}".format(**elb) + zone_id = get_cached_zone_id(r53_conn, cached_zone_ids, elb['hosted_zone']) + print green("Attempting to update: \"{0}\":\"{1}\"".format(record, maintenance_ip)) + r53_conn.update_dns_record(zone_id, record, 'A', maintenance_ip, dry_run=dry_run) + + +@task +def exit_maintenance(dry_run=False): + """ + Exit maintenance mode + + Sets internet-facing elbs hostnames + back to the ELB DNS alias + """ + r53_conn = get_connection(R53) + elb_conn = get_connection(ELB) + + cfn_config = get_config() + stack_name = get_stack_name() + + # In order to traverse from config yaml all the way to the DNS alias for the ELB + # it is required to construct a logical to physical naming for the elbs. So first + # get all elbs for this stack from AWS cloudformation, to be used as a + # filter on the next step + # Note: if stack does not exist this will throw a BotoServerError + stack_elbs = dict([ + (x.get('logical_resource_id', x.get('LogicalResourceId', None)), + x.get('physical_resource_id', x.get('PhysicalResourceId', None))) + for x in elb_conn.cfn.get_stack_load_balancers(stack_name)]) + if None in stack_elbs.keys(): + raise BootstrapCfnError( + "Unable to retrieve logical resource IDs for a stack load balancer.\n" + "ELB Dict: ".format(stack_elbs)) + if None in stack_elbs.values(): + raise BootstrapCfnError( + "Unable to retrieve physical resource IDs for a stack load balancer.\n" + "ELB Dict: ".format(stack_elbs)) + + # filter stack related load balancers (as opposed to all stack elbs in the account) + full_load_balancers = elb_conn.conn_elb.get_all_load_balancers( + load_balancer_names=stack_elbs.values()) + + cached_zone_ids = {} + # loop through elb config entries and change internet facing ones + for elb in cfn_config.data['elb']: + if not apply_maintenance_criteria(elb): + continue + record = "{name}.{hosted_zone}".format(**elb) + # obtain physical name from dict lookup, by converting elb name into safe name + # into logical name + phys_name = stack_elbs[mold_to_safe_elb_name(elb['name'])] + + dns_name = [x.dns_name for x in full_load_balancers if x.name == phys_name] + if len(dns_name) == 1: + dns_name = dns_name[0] + else: + raise BootstrapCfnError( + "Lookup for elb with physical name \"{0}\" returned {1} load balancers, " + "while only exactly 1 was expected".format(phys_name, len(dns_name))) + zone_id = get_cached_zone_id(r53_conn, cached_zone_ids, elb['hosted_zone']) + + # For record_value provide list of params as needed by function set_alias + # http://boto.readthedocs.org/en/latest/ref/route53.html#boto.route53.record.Record.set_alias + record_value = [ + # alias_hosted_zone_id + R53.AWS_ELB_ZONE_ID[env.aws_region], + # alias_dns_name + dns_name, + # alias_evaluate_target_health (True/False) + False + ] + print green("Attempting to update: \"{0}\":{1}".format(record, record_value)) + r53_conn.update_dns_record(zone_id, record, 'A', record_value, is_alias=True, dry_run=dry_run) + + +def get_cached_zone_id(r53_conn, zone_dict, zone_name): + ''' + Gets and cache zone id from route53 + + If we are looping through ELBs we may just have different hostnames in same zone, + so feel free to cache it (and drink a shot because I said 'cache') + + raises CloudResourceNotFoundError if zone is not found + ''' + if zone_name not in zone_dict: + # not found, look it up, cache it up .. + lookup_zone = r53_conn.get_hosted_zone_id(zone_name) + if not lookup_zone: + raise CloudResourceNotFoundError("Zone ID not found for zone: {}".format(zone_name)) + zone_dict[zone_name] = lookup_zone + return zone_dict[zone_name] + + +def mold_to_safe_elb_name(elb_name): + ''' + Molds the elb_name to match cloudformation naming of ELBs + ''' + return 'ELB' + elb_name.replace('-', '').replace('.', '').replace('_', '') + + +def get_stack_name(new=False): + """ + Get the name of the stack + + The name of the stack is a combination + of the application and environment names + and a randomly generated suffix. + + The env.tag dictates which randomly generated suffix + the default env.tag is 'active' + + If new=True we generate a new stack_name and create the + dns records to retreive it in the future. + + """ + if new: + # For back-compatibility + set_stack_name() + + if hasattr(env, 'tag'): + stack_tag = env.tag + else: + stack_tag = 'active' + env.tag = stack_tag + if not hasattr(env, 'stack_name'): + legacy_name = "{0}-{1}".format(env.application, env.environment) + # get_config needs a stack_name so this is a hack because we don't + # know it yet... + env.stack_name = 'temp' + zone_name = get_basic_config().get('master_zone', None) + if not zone_name: + raise CfnConfigError("No master_zone in yaml, unable to create/find DNS records for stack name") + logger.info("fab_tasks::get_stack_name: Found master zone '{}' in config...".format(zone_name)) + + record_name = "stack.{0}.{1}".format(stack_tag, legacy_name) + dns_name = "{}.{}".format(record_name, zone_name) + try: + stack_suffix = dns.resolver.query(dns_name, 'TXT')[0].to_text().replace('"', "") + logger.info("fab_tasks::get_stack_name: Found stack suffix '{}' " + "for dns record '{}'... ".format(stack_suffix, dns_name)) + env.stack_name = "{0}-{1}".format(legacy_name, stack_suffix) + logger.info("fab_tasks::get_stack_name: Found stack name '{}'...".format(env.stack_name)) + except dns.resolver.NXDOMAIN: + raise DNSRecordNotFoundError(zone_name) + + return env.stack_name + + +def set_stack_name(): + """ + Set the name of the stack + + The name of the stack is a combination + of the application and environment names + and a randomly generated suffix. + + The env.tag dictates which randomly generated suffix + tag='active' should be saved as key to switch stacks. + + We generate a new stack_name and create the + dns records to retreive it in the future. + + """ + env.stack_name = 'temp' + cfn_config = get_config() + if not hasattr(env, 'tag'): + stack_tag = cfn_config.stack_id + else: + stack_tag = env.tag + if env.tag=='active': + raise CfnConfigError("The value of tag cannot be 'active'") + env.tag = stack_tag + legacy_name = "{0}-{1}".format(env.application, env.environment) + # get_config needs a stack_name so this is a hack because we don't + # know it yet... + r53_conn = get_connection(R53) + zone_name = cfn_config.data.get('master_zone', None) + if not zone_name: + raise CfnConfigError("No master_zone in yaml, unable to create/find DNS records for stack name") + logger.info("fab_tasks::set_stack_name: Found master zone '{}' in config...".format(zone_name)) + + zone_id = r53_conn.get_hosted_zone_id(zone_name) + if not zone_id: + raise ZoneIDNotFoundError(zone_name) + logger.info("fab_tasks::set_stack_name: Found zone id '{}' " + "for zone name '{}'...".format(zone_id, zone_name)) + record_name = "stack.{0}.{1}".format(stack_tag, legacy_name) + + stack_suffix = cfn_config.stack_id + record = "{0}.{1}".format(record_name, zone_name) + logger.info("fab_tasks::set_stack_name: " + "Creating stack suffix {} " + "for record '{}' " + "in zone id '{}'...".format(stack_suffix, record, zone_id)) + # Let DNS update DNSServerError propogate + r53_conn.update_dns_record(zone_id, record, 'TXT', '"{0}"'.format(stack_suffix)) + env.stack_name = "{0}-{1}".format(legacy_name, stack_suffix) + return env.stack_name + + +def _validate_fabric_env(): + if env.aws is None: + sys.exit("\n[ERROR] Please specify an AWS account, e.g 'aws:dev'") + if env.environment is None: + sys.exit("\n[ERROR] Please specify an environment, e.g 'environment:dev'") + if env.application is None: + sys.exit("\n[ERROR] Please specify an application, e.g 'application:peoplefinder'") + if env.config is None: + sys.exit("\n[ERROR] Please specify a config file, e.g 'config:/tmp/sample-application.yaml'") + elif not os.path.isfile(env.config): + sys.exit("\n[ERROR] Config file %s does not exist" % str(env.config)) + + if env.stack_passwords is not None and not os.path.exists(env.stack_passwords): + print >> sys.stderr, "\n[ERROR] Passwords file '{0}' doesn't exist!".format(env.stack_passwords) + sys.exit(1) + + +def get_basic_config(): + """ + Returns the basic unparsed configuration file for the project + """ + _validate_fabric_env() + project_config = ProjectConfig( + env.config, + env.environment, + passwords=env.stack_passwords) + return project_config.config + + +def get_config(): + Parser = env.get('cloudformation_parser', ConfigParser) + cfn_config = Parser(get_basic_config(), get_stack_name(), environment=env.environment, application=env.application) + return cfn_config + + +def get_connection(klass): + _validate_fabric_env() + return klass(env.aws, env.aws_region) + + +@task +def cfn_delete(force=False, pre_delete_callbacks=None): + """ + Delete the AWS Cloudformation stack + + Deletes the stack and the associated SSL certificates + + Args: + force(bool): True to destroy the stack without any further + input, False to require confirmation before deletion + pre_delete_callbacks(list of callables): callable to invoke before + trying to run the DeleteStack call. Each callback is called with + kwargs of ``stack_name``, and ``config``. (Python only, not setable from + command line) + """ + stack_name = get_stack_name() + if not force: + x = raw_input("Are you really sure you want to blow away the whole stack for {}!? (y/n)\n".format(stack_name)) + if x not in ['y', 'Y', 'Yes', 'yes']: + sys.exit(1) + cfn_config = get_config() + cfn = get_connection(Cloudformation) + + if pre_delete_callbacks is not None: + for callback in pre_delete_callbacks: + callback(stack_name=stack_name, config=cfn_config) + + print green("\nSTACK {0} DELETING...\n").format(stack_name) + + cfn.delete(stack_name) + + if not env.blocking: + print 'Running in non blocking mode. Exiting.' + sys.exit(0) + + # Wait for stacks to delete + print 'Waiting for stack to delete.' + + tail(cfn, stack_name) + + if cfn.stack_missing(stack_name): + print green("Stack successfully deleted") + else: + print red("Stack deletion was unsuccessfull") + + if 'ssl' in cfn_config.data: + iam = get_connection(IAM) + iam.delete_ssl_certificate(cfn_config.ssl(), stack_name) + + +@task +def cfn_create(test=False): + """ + Create the AWS cloudformation stack. + + Using the configuration files, a full cloudformation + specification will be generated and used to create a + stack on AWS. + """ + stack_name = get_stack_name(new=True) + cfn_config = get_config() + + cfn = get_connection(Cloudformation) + if test: + print cfn_config.process() + return + # Upload any SSL certs that we may need for the stack. + if 'ssl' in cfn_config.data: + print green("Uploading SSL certificates to stack") + iam = get_connection(IAM) + iam.upload_ssl_certificate(cfn_config.ssl(), stack_name) + # Useful for debug + # print cfn_config.process() + # Inject security groups in stack template and create stacks. + try: + stack = cfn.create(stack_name, cfn_config.process(), tags=get_cloudformation_tags()) + except: + # cleanup ssl certificates if any + if 'ssl' in cfn_config.data: + print red("Deleting SSL certificates from stack") + iam.delete_ssl_certificate(cfn_config.ssl(), stack_name) + import traceback + abort(red("Failed to create: {error}".format(error=traceback.format_exc()))) + + print green("\nSTACK {0} CREATING...\n").format(stack_name) + + if not env.blocking: + print 'Running in non blocking mode. Exiting.' + sys.exit(0) + + tail(cfn, stack_name) + stack_evt = cfn.get_last_stack_event(stack) + + if stack_evt.resource_status == 'CREATE_COMPLETE': + print 'Successfully built stack {0}.'.format(stack) + else: + # So delete the SSL cert that we uploaded + if 'ssl' in cfn_config.data: + iam.delete_ssl_certificate(cfn_config.ssl(), stack_name) + abort('Failed to create stack: {0}'.format(stack)) + + +@task +def update_certs(): + """ + Update the ssl certificates + + This will read in the certificates from the config + file, update them in AWS Iam, and then also handle + setting the certificates on ELB's + """ + + stack_name = get_stack_name() + cfn_config = get_config() + iam = get_connection(IAM) + # Upload any SSL certificates to our EC2 instances + updated_count = False + if 'ssl' in cfn_config.data: + logger.info("Reloading SSL certificates...") + updated_count = iam.update_ssl_certificates(cfn_config.ssl(), + stack_name) + else: + logger.error("No ssl section found in cloud config file, aborting...") + sys.exit(1) + + # Set the certificates on ELB's if we have any + if updated_count: + if 'elb' in cfn_config.data: + logger.info("Setting load balancer certificates...") + elb = get_connection(ELB) + replaced_certs = elb.set_ssl_certificates(updated_count, + stack_name, + max_retries=3, + retry_delay=10) + for cert_name in replaced_certs: + logger.info("Deleting replaced certificate '%s'..." + % (cert_name)) + iam.delete_certificate(cert_name, + stack_name, + max_retries=3, + retry_delay=10) + else: + logger.error("No certificates updated so skipping " + "ELB certificate update...") + + +def get_cloudformation_tags(): + """ + Get a top-level set of tags for the stack, these will propagate + down so that many of the created resources will be tagged in + addition. Notable omissions are EBS volumes and route53 resources + """ + return { + "Env": env.environment, + "Application": env.application + } + + +@task +def display_elb_dns_entries(): + """ + Prints out the ELB name(s) and the corresponding DNS name(s) for every ELB + in the environment provided. + """ + stack_name = get_stack_name() + elb = get_connection(ELB) + elb_dns_list = elb.list_domain_names(stack_name) + for elb_dns in elb_dns_list: + print "\n\nELB name: {0} DNS: {1}".format(elb_dns['elb_name'], elb_dns['dns_name']) + + +@task +def enable_vpc_peering(): + """ + Enables vpc peering to stacks named in the cloudformation config. + """ + # peer vpc + cfg = get_config() + vpc_cfg = cfg.data.get('vpc', False) + if vpc_cfg: + vpc_obj = VPC(cfg.data, get_stack_name()) + vpc_obj.enable_peering() + + +@task +def disable_vpc_peering(): + """ + Disables vpc peering to stacks named in the cloudformation config. + """ + # peer vpc + cfg = get_config() + vpc_cfg = cfg.data.get('vpc', False) + if vpc_cfg: + vpc_obj = VPC(cfg.data, get_stack_name()) + vpc_obj.disable_peering() + + +@task +def set_autoscaling_desired_capacity(capacity, block=True): + """ + Set the desired capacity the autoscaling group + + Args: + capacity(int): Number of instances desired in + the autoscaling group. + block(bool): Wait for instances to become healthy + and in-service. + """ + asg = get_connection(Autoscale) + if not asg.group: + asg.set_autoscaling_group(get_stack_name()) + asg.set_autoscaling_desired_capacity(capacity=int(capacity)) + if block: + asg.wait_for_instances(int(capacity)) + + +@task +def cycle_instances(delay=None): + """ + Cycle the instances in the autoscaling group + + Args: + delay(int): Number of seconds between new instance + becoming healthy and killing the old one. + """ + asg = get_connection(Autoscale) + if not asg.group: + asg.set_autoscaling_group(get_stack_name()) + if delay: + termination_delay = int(delay) + else: + termination_delay = None + asg.cycle_instances(termination_delay=termination_delay) diff --git a/bootstrap_cfn/fab_tasks.py.rej b/bootstrap_cfn/fab_tasks.py.rej new file mode 100644 index 0000000..c46d120 --- /dev/null +++ b/bootstrap_cfn/fab_tasks.py.rej @@ -0,0 +1,59 @@ +*************** +*** 393,410 **** + dns records to retreive it in the future. + + """ + if hasattr(env, 'tag'): +- stack_tag = env.tag + else: +- stack_tag = 'active' + env.tag = stack_tag + legacy_name = "{0}-{1}".format(env.application, env.environment) + # get_config needs a stack_name so this is a hack because we don't + # know it yet... + env.stack_name = 'temp' +- cfn_config = get_config() + r53_conn = get_connection(R53) +- zone_name = cfn_config.data.get('master_zone', None) + if not zone_name: + raise CfnConfigError("No master_zone in yaml, unable to create/find DNS records for stack name") + logger.info("fab_tasks::set_stack_name: Found master zone '{}' in config...".format(zone_name)) +--- 393,414 ---- + dns records to retreive it in the future. + + """ ++ stack_suffix = uuid.uuid4().__str__()[-8:] + if hasattr(env, 'tag'): ++ # should check if the tag is used already ++ if env.tag == 'active': ++ raise CfnConfigError("tag cannot be 'active'") ++ else: ++ stack_tag = env.tag + else: ++ stack_tag = stack_suffix + env.tag = stack_tag + legacy_name = "{0}-{1}".format(env.application, env.environment) + # get_config needs a stack_name so this is a hack because we don't + # know it yet... + env.stack_name = 'temp' + r53_conn = get_connection(R53) ++ zone_name = get_basic_config().get('master_zone', None) + if not zone_name: + raise CfnConfigError("No master_zone in yaml, unable to create/find DNS records for stack name") + logger.info("fab_tasks::set_stack_name: Found master zone '{}' in config...".format(zone_name)) +*************** +*** 416,422 **** + "for zone name '{}'...".format(zone_id, zone_name)) + record_name = "stack.{0}.{1}".format(stack_tag, legacy_name) + +- stack_suffix = uuid.uuid4().__str__()[-8:] + record = "{0}.{1}".format(record_name, zone_name) + logger.info("fab_tasks::set_stack_name: " + "Creating stack suffix {} " +--- 420,425 ---- + "for zone name '{}'...".format(zone_id, zone_name)) + record_name = "stack.{0}.{1}".format(stack_tag, legacy_name) + + record = "{0}.{1}".format(record_name, zone_name) + logger.info("fab_tasks::set_stack_name: " + "Creating stack suffix {} " From 8396b3dc9f5b97670fdba84c67fe4579a2e6db9e Mon Sep 17 00:00:00 2001 From: yufangzhang Date: Mon, 27 Jun 2016 11:19:48 +0100 Subject: [PATCH 2/8] Add set_active_stack fab task Change cfn_create and cfn_delete to manage multiple stacks. Create set_active_stack fab task to switch between stacks tagged differently --- bootstrap_cfn/errors.py | 25 ++++ bootstrap_cfn/fab_tasks.py | 250 +++++++++++++++++++++++++++++++------ bootstrap_cfn/r53.py | 35 +++++- 3 files changed, 268 insertions(+), 42 deletions(-) diff --git a/bootstrap_cfn/errors.py b/bootstrap_cfn/errors.py index ed89e0b..ce76aa7 100644 --- a/bootstrap_cfn/errors.py +++ b/bootstrap_cfn/errors.py @@ -77,3 +77,28 @@ def __init__(self, autoscaling_group, expected_instance_count, instances): "Could not find {} instances in autoscaling group {}. Actual state is {} instances, {}" .format(expected_instance_count, autoscaling_group, len(instances), instances) ) + +class TagRecordExistConflictError(BootstrapCfnError): + def __init__(self, stack_tag): + msg = ("An {} record already exists. ".format(stack_tag)) + super(TagRecordExistConflictError, self).__init__(msg) + +class ActiveTagExistConflictError(BootstrapCfnError): + def __init__(self, stack_id): + msg = ("An active record already exists in. ".format(stack_id)) + super(ActiveTagExistConflictError, self).__init__(msg) + +class TagRecordNotFoundError(BootstrapCfnError): + def __init__(self, tag): + msg = ("Could not find a dns record for tag '{}'. ".format(tag)) + super(TagRecordNotFoundError, self).__init__(msg) + +class PublicELBNotFoundError(BootstrapCfnError): + def __init__(self): + msg = ("Could not find a internet facing ELB. ") + super(PublicELBNotFoundError, self).__init__(msg) + +class StackRecordNotFoundError(BootstrapCfnError): + def __init__(self, stack_record_name): + msg = ("Could not find a dns record for stack '{}'. ".format(stack_record_name)) + super(StackRecordNotFoundError, self).__init__(msg) diff --git a/bootstrap_cfn/fab_tasks.py b/bootstrap_cfn/fab_tasks.py index c36d2ed..b4278ca 100755 --- a/bootstrap_cfn/fab_tasks.py +++ b/bootstrap_cfn/fab_tasks.py @@ -18,7 +18,10 @@ from bootstrap_cfn.cloudformation import Cloudformation from bootstrap_cfn.config import ConfigParser, ProjectConfig from bootstrap_cfn.elb import ELB -from bootstrap_cfn.errors import BootstrapCfnError, CfnConfigError, CloudResourceNotFoundError, DNSRecordNotFoundError, ZoneIDNotFoundError +from bootstrap_cfn.errors import PublicELBNotFoundError, TagRecordNotFoundError,\ + TagRecordExistConflictError, BootstrapCfnError, CfnConfigError,\ + CloudResourceNotFoundError, DNSRecordNotFoundError, ZoneIDNotFoundError,\ + StackRecordNotFoundError, ActiveTagExistConflictError from bootstrap_cfn.iam import IAM from bootstrap_cfn.r53 import R53 from bootstrap_cfn.utils import tail @@ -355,25 +358,26 @@ def get_stack_name(new=False): stack_tag = 'active' env.tag = stack_tag if not hasattr(env, 'stack_name'): - legacy_name = "{0}-{1}".format(env.application, env.environment) # get_config needs a stack_name so this is a hack because we don't # know it yet... env.stack_name = 'temp' - zone_name = get_basic_config().get('master_zone', None) + zone_name = get_zone_name() if not zone_name: raise CfnConfigError("No master_zone in yaml, unable to create/find DNS records for stack name") logger.info("fab_tasks::get_stack_name: Found master zone '{}' in config...".format(zone_name)) - - record_name = "stack.{0}.{1}".format(stack_tag, legacy_name) + # get record name in the format of: stack.[stack_tag].[app]-[env] + record_name = get_tag_record_name(stack_tag) dns_name = "{}.{}".format(record_name, zone_name) try: + # get stack id stack_suffix = dns.resolver.query(dns_name, 'TXT')[0].to_text().replace('"', "") logger.info("fab_tasks::get_stack_name: Found stack suffix '{}' " "for dns record '{}'... ".format(stack_suffix, dns_name)) + legacy_name = get_legacy_name() env.stack_name = "{0}-{1}".format(legacy_name, stack_suffix) logger.info("fab_tasks::get_stack_name: Found stack name '{}'...".format(env.stack_name)) except dns.resolver.NXDOMAIN: - raise DNSRecordNotFoundError(zone_name) + raise DNSRecordNotFoundError(dns_name) return env.stack_name @@ -393,42 +397,69 @@ def set_stack_name(): dns records to retreive it in the future. """ + # create a stack id stack_suffix = uuid.uuid4().__str__()[-8:] if hasattr(env, 'tag'): - # should check if the tag is used already if env.tag == 'active': - raise CfnConfigError("tag cannot be 'active'") + raise ActiveTagExistConflictError(stack_suffix) + elif hastag(env.tag): + raise TagRecordExistConflictError(env.tag) else: stack_tag = env.tag else: stack_tag = stack_suffix env.tag = stack_tag - legacy_name = "{0}-{1}".format(env.application, env.environment) - # get_config needs a stack_name so this is a hack because we don't - # know it yet... - env.stack_name = 'temp' + zone_id = get_zone_id() + record = "{}.{}".format(get_tag_record_name(stack_tag), get_zone_name()) + logger.info("fab_tasks::set_stack_name: " + "Creating stack suffix {} " + "for record '{}' " + "in zone id '{}'...".format(stack_suffix, record, zone_id)) + # Let DNS update DNSServerError propogate r53_conn = get_connection(R53) + r53_conn.update_dns_record(zone_id, record, 'TXT', '"{0}"'.format(stack_suffix)) + env.stack_name = "{0}-{1}".format(get_legacy_name(), stack_suffix) + return env.stack_name + +def hastag(stack_tag): + """ + Check if stack_tag is in use + + """ + r53_conn = get_connection(R53) + zone_id = get_zone_id() + record = "{}".format(get_tag_record_name(stack_tag)) + hasrecord = r53_conn.get_record(get_zone_name(), zone_id, record, 'TXT') + return hasrecord + +def get_zone_name(): zone_name = get_basic_config().get('master_zone', None) if not zone_name: raise CfnConfigError("No master_zone in yaml, unable to create/find DNS records for stack name") - logger.info("fab_tasks::set_stack_name: Found master zone '{}' in config...".format(zone_name)) + logger.info("fab_tasks::get_zone_id: Found master zone '{}' in config...".format(zone_name)) + return zone_name +def get_zone_id(): + zone_name = get_zone_name() + r53_conn = get_connection(R53) zone_id = r53_conn.get_hosted_zone_id(zone_name) if not zone_id: raise ZoneIDNotFoundError(zone_name) - logger.info("fab_tasks::set_stack_name: Found zone id '{}' " + logger.info("fab_tasks::get_zone_id: Found zone id '{}' " "for zone name '{}'...".format(zone_id, zone_name)) - record_name = "stack.{0}.{1}".format(stack_tag, legacy_name) + return zone_id - record = "{0}.{1}".format(record_name, zone_name) - logger.info("fab_tasks::set_stack_name: " - "Creating stack suffix {} " - "for record '{}' " - "in zone id '{}'...".format(stack_suffix, record, zone_id)) - # Let DNS update DNSServerError propogate - r53_conn.update_dns_record(zone_id, record, 'TXT', '"{0}"'.format(stack_suffix)) - env.stack_name = "{0}-{1}".format(legacy_name, stack_suffix) - return env.stack_name +def get_legacy_name(): + legacy_name = "{0}-{1}".format(env.application, env.environment) + return legacy_name + +def get_tag_record_name(stack_tag): + """ + Returns record name in the format of: stack.[tag].[app]-[env] + """ + legacy_name = get_legacy_name() + record_name = "stack.{0}.{1}".format(stack_tag, legacy_name) + return record_name def _validate_fabric_env(): @@ -474,7 +505,9 @@ def get_connection(klass): @task def cfn_delete(force=False, pre_delete_callbacks=None): """ - Delete the AWS Cloudformation stack + Delete the AWS Cloudformation stack for inactive stacks + + Delete DNS records for active stacks Deletes the stack and the associated SSL certificates @@ -486,35 +519,90 @@ def cfn_delete(force=False, pre_delete_callbacks=None): kwargs of ``stack_name``, and ``config``. (Python only, not setable from command line) """ - stack_name = get_stack_name() - if not force: - x = raw_input("Are you really sure you want to blow away the whole stack for {}!? (y/n)\n".format(stack_name)) - if x not in ['y', 'Y', 'Yes', 'yes']: - sys.exit(1) cfn_config = get_config() cfn = get_connection(Cloudformation) + stack_name = get_stack_name() if pre_delete_callbacks is not None: for callback in pre_delete_callbacks: callback(stack_name=stack_name, config=cfn_config) print green("\nSTACK {0} DELETING...\n").format(stack_name) + r53_conn = get_connection(R53) + if not force: + x = raw_input("Are you really sure you want to blow away the whole stack for {}!? (y/n)\n".format(stack_name)) + if x not in ['y', 'Y', 'Yes', 'yes']: + sys.exit(1) - cfn.delete(stack_name) + elb = get_one_public_elbs() + stack_id = stack_name.split('-')[-1] - if not env.blocking: - print 'Running in non blocking mode. Exiting.' - sys.exit(0) + if hasattr(env, 'tag') and not env.tag =='active': + stack_tag = env.tag + logger.info("Deleting {} stack {}...".format(stack_tag, stack_name)) + # delete helloworld_12345.dsd.io [Alias record] - # Wait for stacks to delete - print 'Waiting for stack to delete.' + stack_record_name = "{}-{}".format(elb, stack_id) + try: + stack_record_value = r53_conn.get_record(get_zone_name(), get_zone_id(), stack_record_name, 'A') + except StackRecordNotFoundError as e: + raise e(stack_record_name) + if stack_record_value: + record_name = "{}.{}".format(stack_record_name, get_zone_name()) + #stack_record_value = dns.resolver.query(stack_record_name, 'A')[0].to_text().replace('"', "") + r53_conn.delete_dns_record(get_zone_id(), record_name, 'A', stack_record_value, is_alias=True) + record_name = "{}.{}".format(get_tag_record_name(stack_tag), get_zone_name()) + try: + record_value = dns.resolver.query(record_name, 'TXT')[0].to_text().replace('"', "") + except dns.resolver.NXDOMAIN: + raise DNSRecordNotFoundError(record_name) + if record_value: + r53_conn.delete_dns_record(get_zone_id(), record_name, 'TXT', '"{}"'.format(record_value)) + + #delete vpc dependencies before cfn + cfn.delete(stack_name) + # Wait for stacks to delete + print 'Waiting for stack to delete.' + tail(cfn, stack_name) + if cfn.stack_missing(stack_name): + print green("Stack successfully deleted") + else: + print red("Stack deletion was unsuccessfull") + else: + # delete dns records instead of stacks + stack_tag = 'active' + # stack.active.helloworld-dev(.dsd.io) + tag_record_name = get_tag_record_name(stack_tag) + try: + record_value = r53_conn.get_record(get_zone_name(), get_zone_id(), tag_record_name, 'TXT') + except TagRecordNotFoundError as e: + raise e(tag_record_name) - tail(cfn, stack_name) + record_name = '{}.{}'.format(tag_record_name, get_zone_name()) + record = '"{}"'.format(record_value) - if cfn.stack_missing(stack_name): - print green("Stack successfully deleted") - else: - print red("Stack deletion was unsuccessfull") + # delete [helloworld.dsd.io] record + main_record_name = "{}.{}".format(elb, get_zone_name()) + stack_record_name = "{}-{}".format(elb, stack_id) + try: + stack_record_value = r53_conn.get_record(get_zone_name(), get_zone_id(), stack_record_name, 'A') + except TagRecordNotFoundError as e: + raise e(stack_record_name) + try: + main_record_value = r53_conn.get_record(get_zone_name(), get_zone_id(), elb, 'A') + except TagRecordNotFoundError as e: + raise e(elb) + if stack_record_value and stack_record_value.to_print() == main_record_value.to_print(): + r53_conn.delete_dns_record(get_zone_id(), main_record_name, 'A', main_record_value, is_alias=True) + + if stack_id and stack_id == record.replace('"',""): + # delete [stack.active.helloworld-dev.dsd.io] record + r53_conn.delete_dns_record(get_zone_id(), record_name, 'TXT', record) + # delete cfn stack at last because helloworld-12345.dsd.io Alias record value is needed ^ + + if not env.blocking: + print 'Running in non blocking mode. Exiting.' + sys.exit(0) if 'ssl' in cfn_config.data: iam = get_connection(IAM) @@ -704,3 +792,83 @@ def cycle_instances(delay=None): else: termination_delay = None asg.cycle_instances(termination_delay=termination_delay) + +@task +def set_active_stack(tag, force=False): + """ + Switch between stacks tagged differently + Update 'active' stacks' DNS records to the one specified. + + """ + # helloworld.active.dsd.io + active_record = get_tag_record_name('active') + r53_conn = get_connection(R53) + zone_name = get_zone_name() + zone_id = get_zone_id() + active_stack_id = r53_conn.get_record(zone_name, zone_id, active_record, 'TXT') + if has_active_stack() and not force: + x = raw_input("The current active stack is {}. Do you wanna change? (y/n)\n".format(active_stack_id)) + if x not in ['y', 'Y', 'Yes', 'yes']: + sys.exit(1) + # helloworld.active.dsd.io + tag_record = get_tag_record_name(tag) + if not tag_record: + raise TagRecordNotFoundError(tag) + else: + tag_stack_id = r53_conn.get_record(zone_name, zone_id, tag_record, 'TXT') + + # update TXT record + # Point [helloworld.dsd.io]'s stack_id to [helloworld-tag.dsd.io]'s + r53_conn.update_dns_record(zone_id, "{}.{}".format(active_record, get_zone_name()),'TXT', '"{}"'.format(tag_stack_id)) + logger.info("fab_tasks::set_active_stack: Successfully updated dns alias record") + + + # get all public facing elbs + elb = get_one_public_elbs() + # helloworld.dsd.io + main_record_name = "{}.{}".format(elb, zone_name) + # helloworld-12345.dsd.io + stack_record_name = "{}-{}.{}".format(elb, tag_stack_id, zone_name) + # get the ELB value in stack_record_name's record + record_value = r53_conn.get_record(zone_name, zone_id, "{}-{}".format(elb, tag_stack_id), 'A') + if record_value: + # point [helloworld.dsd.io] to [helloworld-12345.dsd.io]'s ELB + r53_conn.update_dns_record(zone_id, main_record_name,'A', record_value, is_alias=True) + logger.info("fab_tasks::set_active_stack: Successfully updated dns alias record") + logger.info("Active stack is changed to {}".format(tag_record)) + return True + else: + raise StackRecordNotFoundError(stack_record_name) + +def has_active_stack(): + active_record = get_tag_record_name('active') + r53_conn = get_connection(R53) + zone_name = get_zone_name() + zone_id = get_zone_id() + active_stack_id = r53_conn.get_record(zone_name, zone_id, active_record, 'TXT') + if not active_stack_id: + return False + elb = get_one_public_elbs() + dns_record_name = '{}-{}'.format(elb, active_stack_id) + dns_record_value = r53_conn.get_record(zone_name, zone_id, dns_record_name, 'A') + main_record_value = r53_conn.get_record(zone_name, zone_id, elb, 'A') + if dns_record_value == main_record_value: + return True + +def get_all_elbs(): + cfn_config = get_config() + elbs = [ x.get('name') for x in cfn_config.data.get('elb', {}) if x.get('scheme') == 'internet-facing'] + return elbs + +def get_one_public_elbs(): + # elbs: 0? 1? N? + elbs = get_all_elbs() + if len(elbs) < 1: + raise PublicELBNotFoundError + elif len(elbs) == 1: + logger.info("fab_tasks::set_active_stack: Found one ELB '{}', " + "using it for public ELB... ".format(elbs[0])) + else: + logger.info("fab_tasks::set_active_stack: Found multiple ELBs," + "using the first one '{}' as public ELB".format(elbs[0])) + return elbs[0] \ No newline at end of file diff --git a/bootstrap_cfn/r53.py b/bootstrap_cfn/r53.py index f928892..856ebf5 100644 --- a/bootstrap_cfn/r53.py +++ b/bootstrap_cfn/r53.py @@ -59,7 +59,10 @@ def update_dns_record(self, zone, record, record_type, record_value, is_alias=Fa if is_alias: # provide list of params as needed by function set_alias # http://boto.readthedocs.org/en/latest/ref/route53.html#boto.route53.record.Record.set_alias - change.set_alias(*record_value) + alias_hosted_zone_id = record_value.alias_hosted_zone_id + alias_dns_name = record_value.alias_dns_name + alias_evaluate_target_health = record_value.alias_evaluate_target_health + change.set_alias(alias_hosted_zone_id, alias_dns_name, alias_evaluate_target_health) else: change.add_value(record_value) if dry_run: @@ -68,6 +71,34 @@ def update_dns_record(self, zone, record, record_type, record_value, is_alias=Fa changes.commit() return True + def delete_dns_record(self, zone, record, record_type, record_value, is_alias=False, dry_run=False): + ''' + Delete a dns record in route53 + + zone -- a string specifying the zone id + record -- a string for the record to update + record_type -- a string to specify the record, eg "A" + + + Returns True if update successful or raises an exception if not + ''' + changes = boto.route53.record.ResourceRecordSets(self.conn_r53, zone) + change = changes.add_change("DELETE", record, record_type, ttl=60) + if is_alias: + # provide list of params as needed by function set_alias + # http://boto.readthedocs.org/en/latest/ref/route53.html#boto.route53.record.Record.set_alias + alias_hosted_zone_id = record_value.alias_hosted_zone_id + alias_dns_name = record_value.alias_dns_name + alias_evaluate_target_health = record_value.alias_evaluate_target_health + change.set_alias(alias_hosted_zone_id, alias_dns_name, alias_evaluate_target_health) + else: + change.add_value(record_value) + if dry_run: + print(changes) + else: + res = changes.commit() + return res + def get_record(self, zone, zone_id, record, record_type): ''' ''' @@ -77,5 +108,7 @@ def get_record(self, zone, zone_id, record, record_type): if rr.type == record_type and rr.name == fqdn: if rr.type == 'TXT': rr.resource_records[0] = rr.resource_records[0][1:-1] + if rr.type == 'A': + return rr return rr.resource_records[0] return None From 32b8bbf52933bc4156e84da10d00a13c97242f31 Mon Sep 17 00:00:00 2001 From: yufangzhang Date: Wed, 29 Jun 2016 13:58:01 +0100 Subject: [PATCH 3/8] Add a task get_active_stack & README --- README.rst | 36 +- bootstrap_cfn/config.py.orig | 1152 ------------------------------- bootstrap_cfn/config.py.rej | 17 - bootstrap_cfn/errors.py | 15 +- bootstrap_cfn/fab_tasks.py | 265 ++++--- bootstrap_cfn/fab_tasks.py.orig | 704 ------------------- bootstrap_cfn/fab_tasks.py.rej | 59 -- bootstrap_cfn/r53.py | 112 +-- tests/test_r53.py | 8 +- tests/tests.py | 10 +- 10 files changed, 276 insertions(+), 2102 deletions(-) delete mode 100644 bootstrap_cfn/config.py.orig delete mode 100644 bootstrap_cfn/config.py.rej delete mode 100755 bootstrap_cfn/fab_tasks.py.orig delete mode 100644 bootstrap_cfn/fab_tasks.py.rej diff --git a/README.rst b/README.rst index ba21468..63bbb08 100644 --- a/README.rst +++ b/README.rst @@ -57,20 +57,52 @@ If your ``$CWD`` is anywhere else, you need to pass in a path to particular fabr Multiple Stacks =============== +Each environment can have multiple stacks. +Stack tagged with ``active`` points to host's entry point. + If you want to run multiple stacks with the same name and environment place the following in the yaml configuration:: master_zone: my-zone.dsd.io +cfn_create +++++++++++ + Then when you create a stack you can specify a tag before cfn_create, like:: fab application:courtfinder aws:my_project_prod environment:dev config:/path/to/courtfinder-dev.yaml tag:active cfn_create -NB active is the default. + +set_active_stack(tag_name) +++++++++++++++++++++++++++ + +NB active is the default. You can also set other stack as active using another fab task ``set_active_stack(tag_name)``:: + + fab application:courtfinder aws:my_project_prod environment:dev config:/path/to/courtfinder-dev.yaml set_active_stack:[tag_name] + +cfn_delete +++++++++++ + +You can also delete any stack you want no more by specifying the tag:: + + fab application:courtfinder aws:my_project_prod environment:dev config:/path/to/courtfinder-dev.yaml tag:[tag_name] cfn_delete + +NB ``tag_name`` can be any created tag. active is the default. +When deleting an active stack, only active DNS records are removed. Otherwise whole stack is removed. + + +swap_tags ++++++++++ Then you can refer to this stack by its tag in the future. In this way it is easier to bring up two stacks from the same config. If you want to swap the names of the stacks you can do the following:: - fab application:courtfinder aws:my_project_prod environment:dev config:/path/to/courtfinder-dev.yaml swap_tags:inactive,active + fab application:courtfinder aws:my_project_prod environment:dev config:/path/to/courtfinder-dev.yaml swap_tags:inactive, active + + +others +++++++ + +There are also some fab tasks for example ``get_active_stack`` that returns active stack for this application and environment; ``get_stack_list`` returns any related stacks. Example Configuration ===================== diff --git a/bootstrap_cfn/config.py.orig b/bootstrap_cfn/config.py.orig deleted file mode 100644 index 6e5e022..0000000 --- a/bootstrap_cfn/config.py.orig +++ /dev/null @@ -1,1152 +0,0 @@ -import json -import logging -import os -import sys -import textwrap -import uuid - -from troposphere import Base64, FindInMap, GetAZs, GetAtt, Join, Output, Ref, Tags, Template -from troposphere.autoscaling import AutoScalingGroup, BlockDeviceMapping, \ - EBSBlockDevice, LaunchConfiguration, Tag -from troposphere.ec2 import InternetGateway, Route, RouteTable, SecurityGroup, \ - SecurityGroupIngress, Subnet, SubnetRouteTableAssociation, VPC, \ - VPCGatewayAttachment -from troposphere.elasticache import ReplicationGroup, SubnetGroup - -from troposphere.elasticloadbalancing import ConnectionDrainingPolicy, \ - HealthCheck, LoadBalancer, Policy -from troposphere.iam import InstanceProfile, PolicyType, Role -from troposphere.rds import DBInstance, DBSubnetGroup -from troposphere.route53 import AliasTarget, RecordSet, RecordSetGroup -from troposphere.s3 import Bucket, BucketPolicy - -import yaml - -from bootstrap_cfn import errors, mime_packer, utils - - -class ProjectConfig: - - config = None - - def __init__(self, config, environment, passwords=None): - try: - self.config = self.load_yaml(config)[environment] - except KeyError: - raise errors.BootstrapCfnError("Environment " + environment + " not found") - - if passwords: - passwords_dict = self.load_yaml(passwords)[environment] - self.config = utils.dict_merge(self.config, passwords_dict) - - @staticmethod - def load_yaml(fp): - if os.path.exists(fp): - return yaml.load(open(fp).read()) - - -class ConfigParser(object): - - config = {} - - def __init__(self, data, stack_name, environment=None, application=None): - self.stack_name = stack_name - self.stack_id = uuid.uuid4().__str__()[-8:] - self.data = data - - # Some things possibly used in user data templates - self.environment = environment - self.application = application - - def process(self): - template = self.base_template() - - vpc = self.vpc() - map(template.add_resource, vpc) - - iam = self.iam() - map(template.add_resource, iam) - - ec2 = self.ec2() - map(template.add_resource, ec2) - - if 'elb' in self.data: - self.elb(template) - - if 'rds' in self.data: - self.rds(template) - - if 'elasticache' in self.data: - self.elasticache(template) - - if 's3' in self.data: - self.s3(template) - - template = json.loads(template.to_json()) - if 'includes' in self.data: - for inc_path in self.data['includes']: - inc = json.load(open(inc_path)) - template = utils.dict_merge(template, inc) - return json.dumps( - template, sort_keys=True, indent=4, separators=(',', ': ')) - - def base_template(self): - from bootstrap_cfn import vpc - t = Template() - - # Get the OS specific data - os_data = self._get_os_data() - t.add_mapping("AWSRegion2AMI", { - os_data.get('region'): {"AMI": os_data.get('ami')}, - }) - - if 'vpc' in self.data: - logging.info('bootstrap-cfn::base_template: Using configuration VPC address settings') - vpc_data = self.data.get('vpc', {}) - vpc_cidr = vpc_data.get('CIDR', '10.0.0.0/16') - subneta_cidr = vpc_data.get('SubnetA', '10.0.0.0/20') - subnetb_cidr = vpc_data.get('SubnetB', '10.0.16.0/20') - subnetc_cidr = vpc_data.get('SubnetC', '10.0.32.0/20') - t.add_mapping("SubnetConfig", { - "VPC": { - "CIDR": vpc_cidr, - "SubnetA": subneta_cidr, - "SubnetB": subnetb_cidr, - "SubnetC": subnetc_cidr - } - }) - else: - default_vpc_cidr_prefix = 24 - default_vpc_subnet_prefix = 28 - default_vpc_subnet_count = 3 - - # Try to get random CIDR - available_cidr_block, subnet_cidr_blocks = ( - vpc.get_available_cidr_block( - default_vpc_cidr_prefix, - subnet_prefix=default_vpc_subnet_prefix) - ) - if available_cidr_block and len(subnet_cidr_blocks) > (default_vpc_subnet_count - 1): - logging.info('bootstrap-cfn::base_template: Using dynamic VPC address settings') - vpc_cidr = available_cidr_block - subneta_cidr = subnet_cidr_blocks[0] - subnetb_cidr = subnet_cidr_blocks[1] - subnetc_cidr = subnet_cidr_blocks[2] - else: - # Fallback to default - logging.info('bootstrap-cfn::base_template: Using static fallback VPC address settings') - vpc_cidr = "10.0.0.0/24" - subneta_cidr = "10.0.0.0/20" - subnetb_cidr = "10.0.16.0/20" - subnetc_cidr = "10.0.32.0/20" - - t.add_mapping("SubnetConfig", { - "VPC": { - "CIDR": vpc_cidr, - "SubnetA": subneta_cidr, - "SubnetB": subnetb_cidr, - "SubnetC": subnetc_cidr - } - }) - - return t - - def vpc(self): - - vpc = VPC( - "VPC", - InstanceTenancy="default", - EnableDnsSupport="true", - CidrBlock=FindInMap("SubnetConfig", "VPC", "CIDR"), - EnableDnsHostnames="true", - ) - - subnet_a = Subnet( - "SubnetA", - VpcId=Ref(vpc), - AvailabilityZone="eu-west-1a", - CidrBlock=FindInMap("SubnetConfig", "VPC", "SubnetA"), - Tags=Tags( - Application=Ref("AWS::StackId"), - Network="Public", - ), - ) - - subnet_b = Subnet( - "SubnetB", - VpcId=Ref(vpc), - AvailabilityZone="eu-west-1b", - CidrBlock=FindInMap("SubnetConfig", "VPC", "SubnetB"), - Tags=Tags( - Application=Ref("AWS::StackId"), - Network="Public", - ), - ) - - subnet_c = Subnet( - "SubnetC", - VpcId=Ref(vpc), - AvailabilityZone="eu-west-1c", - CidrBlock=FindInMap("SubnetConfig", "VPC", "SubnetC"), - Tags=Tags( - Application=Ref("AWS::StackId"), - Network="Public", - ), - ) - - igw = InternetGateway( - "InternetGateway", - Tags=Tags( - Application=Ref("AWS::StackId"), - Network="Public", - ), - ) - - gw_attachment = VPCGatewayAttachment( - "AttachGateway", - VpcId=Ref(vpc), - InternetGatewayId=Ref(igw), - ) - - route_table = RouteTable( - "PublicRouteTable", - VpcId=Ref(vpc), - Tags=Tags( - Application=Ref("AWS::StackId"), - Network="Public", - ), - ) - - public_route = Route( - "PublicRoute", - GatewayId=Ref(igw), - DestinationCidrBlock="0.0.0.0/0", - RouteTableId=Ref(route_table), - DependsOn=gw_attachment.title - ) - - subnet_a_route_assoc = SubnetRouteTableAssociation( - "SubnetRouteTableAssociationA", - SubnetId=Ref(subnet_a), - RouteTableId=Ref(route_table), - ) - - subnet_b_route_assoc = SubnetRouteTableAssociation( - "SubnetRouteTableAssociationB", - SubnetId=Ref(subnet_b), - RouteTableId=Ref(route_table), - ) - - subnet_c_route_assoc = SubnetRouteTableAssociation( - "SubnetRouteTableAssociationC", - SubnetId=Ref(subnet_c), - RouteTableId=Ref(route_table), - ) - - resources = [vpc, subnet_a, subnet_b, subnet_c, igw, gw_attachment, - public_route, route_table, subnet_a_route_assoc, - subnet_b_route_assoc, subnet_c_route_assoc] - - # Hack until we return troposphere objects directly - # return json.loads(json.dumps(dict((r.title, r) for r in resources), cls=awsencode)) - return resources - - def iam(self): - role = Role( - "BaseHostRole", - Path="/", - AssumeRolePolicyDocument={ - "Statement": [{ - "Action": ["sts:AssumeRole"], - "Effect": "Allow", - "Principal": {"Service": ["ec2.amazonaws.com"]} - }] - }, - ) - - role_policies = PolicyType( - "RolePolicies", - PolicyName="BaseHost", - PolicyDocument={"Statement": [ - {"Action": ["autoscaling:Describe*"], "Resource": "*", "Effect": "Allow"}, - {"Action": ["ec2:Describe*"], "Resource": "*", "Effect": "Allow"}, - {"Action": ["ec2:CreateTags"], "Resource": "*", "Effect": "Allow"}, - {"Action": ["rds:Describe*"], "Resource": "*", "Effect": "Allow"}, - {"Action": ["elasticloadbalancing:Describe*"], "Resource": "*", "Effect": "Allow"}, - {"Action": ["elasticache:Describe*"], "Resource": "*", "Effect": "Allow"}, - {"Action": ["cloudformation:Describe*"], "Resource": "*", "Effect": "Allow"}, - {"Action": ["s3:List*"], "Resource": "*", "Effect": "Allow"} - ]}, - Roles=[Ref(role)], - ) - instance_profile = InstanceProfile( - "InstanceProfile", - Path="/", - Roles=[Ref(role)], - ) - - resources = [role, role_policies, instance_profile] - # Hack until we return troposphere objects directly - # return json.loads(json.dumps(dict((r.title, r) for r in resources), cls=awsencode)) - return resources - - def s3(self, template): - """ - Create an s3 resource configuration from the config file data. - This will produce Bucket and BucketPolicy resources along with - the bucket name as output, these are all added to the troposphere - template. - - Args: - template: - The troposphere.Template object - """ - # As there are no required fields, although we may not have any - # subkeys we still need to be able to have a parent key 's3:' to - # signify that we want to create an s3 bucket. In this case we - # set up an empty (no options set) dictionary - present_keys = {} - if isinstance(self.data['s3'], dict): - present_keys = self.data['s3'].keys() - - # Enable specifying multiple buckets - if 'buckets' in present_keys: - bucket_list = self.data['s3'].get('buckets') - for bucket_config in bucket_list: - self.create_s3_bucket(bucket_config, template) - - # If the static bucket name is manually set then use that, - # otherwise use the -- - # default - bucket = Bucket( - "StaticBucket", - AccessControl="BucketOwnerFullControl", - ) - if 'static-bucket-name' in present_keys: - bucket.BucketName = self.data['s3']['static-bucket-name'] - - # If a policy has been manually set then use it, otherwise set - # a reasonable default of public 'Get' access - if 'policy' in present_keys: - policy = json.loads(open(self.data['s3']['policy']).read()) - else: - arn = Join("", ["arn:aws:s3:::", Ref(bucket), "/*"]) - policy = { - 'Action': ['s3:GetObject'], - "Resource": arn, - 'Effect': 'Allow', - 'Principal': '*'} - - bucket_policy = BucketPolicy( - "StaticBucketPolicy", - Bucket=Ref(bucket), - PolicyDocument={"Statement": [policy]}, - ) - # Add the bucket name to the list of cloudformation - # outputs - template.add_output(Output( - "StaticBucketName", - Description="S3 bucket name", - Value=Ref(bucket) - )) - - # Add the resources to the troposphere template - map(template.add_resource, [bucket, bucket_policy]) - - def create_s3_bucket(self, bucket_config, template): - """ - Create an s3 bucket configuration from config data. - This will produce Bucket and BucketPolicy resources along with - the bucket name as output, these are all added to the troposphere - template. - - Args: - bucket_config(dictionary): Keyed bucket config settings - template: - The troposphere.Template object - """ - bucket_name = bucket_config.get('name') - bucket = Bucket( - bucket_name, - AccessControl="BucketOwnerFullControl", - ) - - # If a policy has been manually set then use it, otherwise set - # a reasonable default of public 'Get' access - if 'policy' in bucket_config: - policy = json.loads(open(bucket_config['policy']).read()) - else: - arn = Join("", ["arn:aws:s3:::", Ref(bucket), "/*"]) - policy = { - 'Action': ['s3:DeleteObject', 's3:GetObject', 's3:PutObject'], - "Resource": arn, - 'Effect': 'Allow', - 'Principal': '*', - "Condition": { - "StringEquals": { - "aws:sourceVpc": {"Ref": "VPC"} - } - } - } - bucket_policy = BucketPolicy( - "{}Policy".format(bucket_name), - Bucket=Ref(bucket), - PolicyDocument={"Statement": [policy]}, - ) - # Add the bucket name to the list of cloudformation - # outputs - template.add_output(Output( - "{}Policy".format(bucket_name), - Description="S3 bucket name", - Value=Ref(bucket) - )) - - map(template.add_resource, [bucket, bucket_policy]) - - def ssl(self): - return self.data['ssl'] - - def rds(self, template): - """ - Create an RDS resource configuration from the config file data - and add it to the troposphere.Template. Outputs for the RDS name, - host and port are created. - - Args: - template: - The troposphere.Template object - """ - # REQUIRED FIELDS MAPPING - required_fields = { - 'db-name': 'DBName', - 'storage': 'AllocatedStorage', - 'storage-type': 'StorageType', - 'backup-retention-period': 'BackupRetentionPeriod', - 'db-master-username': 'MasterUsername', - 'db-master-password': 'MasterUserPassword', - 'db-engine': 'Engine', - 'db-engine-version': 'EngineVersion', - 'instance-class': 'DBInstanceClass', - 'multi-az': 'MultiAZ' - } - - optional_fields = { - 'storage-encrypted': 'StorageEncrypted', - 'identifier': 'DBInstanceIdentifier' - } - - # LOAD STACK TEMPLATE - resources = [] - rds_subnet_group = DBSubnetGroup( - "RDSSubnetGroup", - SubnetIds=[Ref("SubnetA"), Ref("SubnetB"), Ref("SubnetC")], - DBSubnetGroupDescription="VPC Subnets" - ) - resources.append(rds_subnet_group) - - database_sg = SecurityGroup( - "DatabaseSG", - SecurityGroupIngress=[ - {"ToPort": 5432, - "FromPort": 5432, - "IpProtocol": "tcp", - "CidrIp": FindInMap("SubnetConfig", "VPC", "CIDR")}, - {"ToPort": 1433, - "FromPort": 1433, - "IpProtocol": "tcp", - "CidrIp": FindInMap("SubnetConfig", "VPC", "CIDR")}, - {"ToPort": 3306, - "FromPort": 3306, - "IpProtocol": "tcp", - "CidrIp": FindInMap("SubnetConfig", "VPC", "CIDR")} - ], - VpcId=Ref("VPC"), - GroupDescription="SG for EC2 Access to RDS", - ) - resources.append(database_sg) - - rds_instance = DBInstance( - "RDSInstance", - PubliclyAccessible=False, - AllowMajorVersionUpgrade=False, - AutoMinorVersionUpgrade=False, - VPCSecurityGroups=[GetAtt(database_sg, "GroupId")], - DBSubnetGroupName=Ref(rds_subnet_group), - StorageEncrypted=False, - DependsOn=database_sg.title - ) - resources.append(rds_instance) - - # We *cant* specify db-name for SQL Server based RDS instances. :( - if 'db-engine' in self.data['rds'] and self.data['rds']['db-engine'].startswith("sqlserver"): - required_fields.pop('db-name') - - # TEST FOR REQUIRED FIELDS AND EXIT IF MISSING ANY - for yaml_key, rds_prop in required_fields.iteritems(): - if yaml_key not in self.data['rds']: - print "\n\n[ERROR] Missing RDS fields [%s]" % yaml_key - sys.exit(1) - else: - rds_instance.__setattr__(rds_prop, self.data['rds'][yaml_key]) - - for yaml_key, rds_prop in optional_fields.iteritems(): - if yaml_key in self.data['rds']: - rds_instance.__setattr__(rds_prop, self.data['rds'][yaml_key]) - - # Add resources and outputs - map(template.add_resource, resources) - template.add_output(Output( - "dbhost", - Description="RDS Hostname", - Value=GetAtt(rds_instance, "Endpoint.Address") - )) - template.add_output(Output( - "dbport", - Description="RDS Port", - Value=GetAtt(rds_instance, "Endpoint.Port") - )) - - def elasticache(self, template): - """ - Create an elasticache resource configuration from the config file data - and add it to the troposphere.Template. Outputs for the elasticache name, - host and port are created. - - Args: - template: - The troposphere.Template object - """ - # REQUIRED FIELDS MAPPING - required_fields = { - } - - optional_fields = { - 'clusters': 'NumCacheClusters', - 'node_type': 'CacheNodeType', - 'port': 'Port', - } - - # Setup params and config - component_config = self.data['elasticache'] - # Setup defaults - if 'clusters' not in component_config: - component_config['clusters'] = 3 - if 'node_type' not in component_config: - component_config['node_type'] = 'cache.m1.small' - if 'port' not in component_config: - component_config['port'] = 6379 - - engine = 'redis' - - # Generate snapshot arns - seeds = component_config.get('seeds', None) - snapshot_arns = [] - if seeds: - # Get s3 seeds - s3_seeds = seeds.get('s3', []) - for seed in s3_seeds: - snapshot_arns.append("arn:aws:s3:::%s" % (seed)) - - # LOAD STACK TEMPLATE - resources = [] - - es_sg = SecurityGroup( - "ElasticacheSG", - SecurityGroupIngress=[ - {"ToPort": component_config['port'], - "FromPort": component_config['port'], - "IpProtocol": "tcp", - "CidrIp": FindInMap("SubnetConfig", "VPC", "CIDR")} - ], - VpcId=Ref("VPC"), - GroupDescription="SG for EC2 Access to Elasticache", - ) - resources.append(es_sg) - - es_subnet_group = SubnetGroup( - 'ElasticacheSubnetGroup', - Description="Elasticache Subnet Group", - SubnetIds=[Ref("SubnetA"), Ref("SubnetB"), Ref("SubnetC")] - ) - resources.append(es_subnet_group) - - elasticache_replication_group = ReplicationGroup( - "ElasticacheReplicationGroup", - ReplicationGroupDescription='Elasticache Replication Group', - Engine=engine, - NumCacheClusters=component_config['clusters'], - CacheNodeType=component_config['node_type'], - SecurityGroupIds=[GetAtt(es_sg, "GroupId")], - CacheSubnetGroupName=Ref(es_subnet_group), - SnapshotArns=snapshot_arns - ) - resources.append(elasticache_replication_group) - - # TEST FOR REQUIRED FIELDS AND EXIT IF MISSING ANY - for yaml_key, prop in required_fields.iteritems(): - if yaml_key not in component_config: - print "\n\n[ERROR] Missing Elasticache fields [%s]" % yaml_key - sys.exit(1) - else: - elasticache_replication_group.__setattr__(prop, component_config[yaml_key]) - - for yaml_key, prop in optional_fields.iteritems(): - if yaml_key in component_config: - elasticache_replication_group.__setattr__(prop, component_config[yaml_key]) - - # Add resources and outputs - map(template.add_resource, resources) - - template.add_output(Output( - "ElasticacheReplicationGroupName", - Description="Elasticache Replication Group Name", - Value=Ref(elasticache_replication_group) - )) - template.add_output(Output( - "ElasticacheEngine", - Description="Elasticache Engine", - Value=engine - )) - - def elb(self, template): - """ - Create an ELB resource configuration from the config file data - and add them to the troposphere template. Outputs for each ELB's - DNSName are created. - - Args: - template: - The cloudformation template file - """ - # REQUIRED FIELDS AND MAPPING - # Note, 'name' field is used internally to help label - # logical ids, and as part of the DNS record name. - required_fields = { - 'listeners': 'Listeners', - 'scheme': 'Scheme', - 'name': None, - 'hosted_zone': 'HostedZoneName' - } - - elb_list = [] - elb_sgs = [] - # COULD HAVE MULTIPLE ELB'S (PUBLIC / PRIVATE etc) - for elb in self.data['elb']: - safe_name = elb['name'].replace('-', '').replace('.', '').replace('_', '') - # TEST FOR REQUIRED FIELDS AND EXIT IF MISSING ANY - for i in required_fields.keys(): - if i not in elb.keys(): - print "\n\n[ERROR] Missing ELB fields [%s]" % i - sys.exit(1) - - # Collect together all policies - elb_policies = [ - Policy( - Attributes=[{'Name': "Reference-Security-Policy", 'Value': "ELBSecurityPolicy-2015-05"}], - PolicyType='SSLNegotiationPolicyType', - PolicyName='PinDownSSLNegotiationPolicy201505' - )] - for custom_policy_config in elb.get('policies', []): - custom_policy_name = custom_policy_config.get('name', False) - custom_policy_type = custom_policy_config.get('type', False) - - if not custom_policy_name: - logging.critical("config::elb: Load balancer policy must have a name defined") - sys.exit(1) - if not custom_policy_type: - logging.critical("config::elb: Load balancer policy {} must have a type defined".format(custom_policy_name)) - sys.exit(1) - - custom_policy_attributes = [] - for custom_policy_attribute_config in custom_policy_config.get('attributes', []): - for custom_policy_attribute_key, custom_policy_attribute_val in custom_policy_attribute_config.iteritems(): - custom_policy_attributes_entry = { - 'Name': custom_policy_attribute_key, - 'Value': custom_policy_attribute_val - } - custom_policy_attributes.append(custom_policy_attributes_entry) - - custom_policy = Policy( - Attributes=custom_policy_attributes, - PolicyType=custom_policy_type, - PolicyName=custom_policy_name, - ) - # Dont set these unless theyre in the config, other CFN will break - if custom_policy_config.get('instance_ports', False): - custom_policy.InstancePorts = custom_policy_config.get('instance_ports') - if custom_policy_config.get('load_balancer_ports', False): - custom_policy.LoadBalancerPorts = custom_policy_config.get('load_balancer_ports') - - elb_policies.append(custom_policy) - - load_balancer = LoadBalancer( - "ELB" + safe_name, - Subnets=[Ref("SubnetA"), Ref("SubnetB"), Ref("SubnetC")], - Listeners=elb['listeners'], - Scheme=elb['scheme'], - ConnectionDrainingPolicy=ConnectionDrainingPolicy( - Enabled=True, - Timeout=120, - ), - Policies=elb_policies - ) - if "health_check" in elb: - load_balancer.HealthCheck = HealthCheck(**elb['health_check']) - - for listener in load_balancer.Listeners: - if listener['Protocol'] == 'HTTPS': - try: - cert_name = elb['certificate_name'] - except KeyError: - raise errors.CfnConfigError( - "HTTPS listener but no certificate_name specified") - try: - self.ssl()[cert_name]['cert'] - self.ssl()[cert_name]['key'] - except KeyError: - raise errors.CfnConfigError( - "Couldn't find ssl cert {0} in config file".format(cert_name)) - - listener["SSLCertificateId"] = Join("", [ - "arn:aws:iam::", - Ref("AWS::AccountId"), - ":server-certificate/", - "{0}-{1}".format(cert_name, self.stack_name)] - ) - # if not present, add the default cipher policy - if 'PolicyNames' not in listener: - logging.debug( - "ELB Listener for port 443 has no SSL Policy. " + - "Using default ELBSecurityPolicy-2015-05") - listener['PolicyNames'] = ['PinDownSSLNegotiationPolicy201505'] - """ - # Get all the listeners policy names and setup the policies they refer to - for policy_name in listener.get('PolicyNames', []): - matched_policies = [custom_policy for custom_policy in elb_policies - if custom_policy.PolicyName == policy_name] - assert(len(matched_policies) == 1) - matched_policy = matched_policies[0] - # Get the current ports defined in the troposphere policies config and append - # the listers ports - updated_instance_ports = matched_policy.properties.get('InstancePorts', []) - updated_instance_ports.append("{}".format(listener['InstancePort'])) - matched_policy.properties['InstancePorts'] = updated_instance_ports - updated_instance_ports = matched_policy.properties.get('LoadBalancerPorts', []) - updated_instance_ports.append("{}".format(listener['LoadBalancerPort'])) - matched_policy.properties['LoadBalancerPorts'] = updated_instance_ports - """ - elb_list.append(load_balancer) - - dns_record = RecordSetGroup( - "DNS" + safe_name, - HostedZoneName=elb['hosted_zone'], - Comment="Zone apex alias targeted to ElasticLoadBalancer.", - RecordSets=[ - RecordSet( - "TitleIsIgnoredForThisResource", - Name="%s-%s.%s" % (elb['name'], self.stack_id, elb['hosted_zone']), - Type="A", - AliasTarget=AliasTarget( - GetAtt(load_balancer, "CanonicalHostedZoneNameID"), - GetAtt(load_balancer, "DNSName"), - ), - ), - ] - ) - elb_list.append(dns_record) - - elb_role_policies = PolicyType( - "Policy" + safe_name, - PolicyName=safe_name + "BaseHost", - PolicyDocument={"Statement": [{ - "Action": [ - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer" - ], - "Resource": [ - Join("", [ - "arn:aws:elasticloadbalancing:", - Ref("AWS::Region"), - ":", - Ref("AWS::AccountId"), - ':loadbalancer/', - Ref(load_balancer) - ]) - ], - "Effect": "Allow"} - ]}, - Roles=[Ref("BaseHostRole")], - ) - elb_list.append(elb_role_policies) - - if "security_groups" in elb: - load_balancer.SecurityGroups = [] - for sg_name, sg_rules in elb['security_groups'].items(): - sg = SecurityGroup( - sg_name, - GroupDescription=sg_name, - SecurityGroupIngress=sg_rules, - VpcId=Ref("VPC") - ) - load_balancer.SecurityGroups.append(Ref(sg)) - elb_sgs.append(sg) - else: - sg = SecurityGroup( - "DefaultSG" + safe_name, - GroupDescription="DefaultELBSecurityGroup", - SecurityGroupIngress=[ - { - "IpProtocol": "tcp", - "FromPort": 443, - "ToPort": 443, - "CidrIp": "0.0.0.0/0" - }, - { - "IpProtocol": "tcp", - "FromPort": 80, - "ToPort": 80, - "CidrIp": "0.0.0.0/0" - } - ], - VpcId=Ref("VPC") - ) - load_balancer.SecurityGroups = [Ref(sg)] - elb_sgs.append(sg) - - # Add outputs - output_name = "ELB" + safe_name - logging.debug("config:elb:Adding output to ELB '%s'" % (output_name)) - template.add_output(Output( - output_name, - Description="ELB DNSName", - Value=GetAtt(load_balancer, "DNSName") - )) - - # Update template with ELB resources - map(template.add_resource, elb_list) - map(template.add_resource, elb_sgs) - template = self._attach_elbs(template) - - def _convert_ref_dict_to_objects(self, o): - """ - Some troposphere objects need troposphere.Ref objects instead of a - plain dict of {"Ref": "x" }. This helper function will do such - transformations and return a new dict - """ - def ref_fixup(x): - if isinstance(x, dict) and "Ref" in x: - return Ref(x["Ref"]) - else: - return x - return dict([(k, ref_fixup(v)) for k, v in o.items()]) - - def get_ec2_userdata(self): - """ - Build and return the user_data that'll be used for ec2 instances. - This contains a series of required entries, default config, and - and data specified in the template. - """ - os_data = self._get_os_data() - data = self.data['ec2'] - parts = [] - - ami_type = os_data.get('type') - - # Below is the ami flavour specific defaults - if ami_type == 'linux': - parts.append({ - 'content': yaml.dump( - { - 'package_update': True, - 'package_upgrade': True, - 'package_reboot_if_required': True - } - ), - 'mime_type': 'text/cloud-config' - }) - - boothook = self.get_hostname_boothook(data) - - if boothook: - parts.append(boothook) - - if "cloud_config" in data: - parts.append({ - 'content': yaml.dump(data['cloud_config']), - 'mime_type': 'text/cloud-config' - }) - elif boothook: - # If the hostname boothook is specified then make sure we include - # the 'manage_hostname' cloud-init config so that `sudo` doesn't - # complaint about unable to resolve host name - parts.append({ - 'content': yaml.dump({'manage_etc_hosts': True}), - 'mime_type': 'text/cloud-config' - }) - - if len(parts): - return parts - - HOSTNAME_BOOTHOOK_TEMPLATE = textwrap.dedent("""\ - #!/bin/sh - [ -e /etc/cloud/cloud.cfg.d/99_hostname.cfg ] || echo "hostname: {hostname}" > /etc/cloud/cloud.cfg.d/99_hostname.cfg - """) - - DEFAULT_HOSTNAME_PATTERN = "{instance_id}.{environment}.{application}" - - def get_hostname_boothook(self, data): - """ - Return a boothook part that will set the hostname of instances on boot. - - The pattern comes from the ``hostname_pattern`` pattern of data dict, - with a default of "{instance_id}.{environment}.{application}". To - disable this functionality explicitly pass None in this field. - """ - hostname_pattern = data.get('hostname_pattern', self.DEFAULT_HOSTNAME_PATTERN) - if hostname_pattern is None: - return None - - interploations = { - # This gets interploated by cloud-init at run time. - 'instance_id': '${INSTANCE_ID}', - 'tags': data['tags'], - 'environment': self.environment, - 'application': self.application, - 'stack_name': self.stack_name, - } - try: - hostname = hostname_pattern.format(**interploations) - except KeyError as e: - raise errors.CfnHostnamePatternError("Error interpolating hostname_pattern '{pattern}' - {key} is not a valid interpolation".format( - pattern=hostname_pattern, - key=e.args[0])) - - # Warn the user that they probably want to set 'manage_etc_hosts' - if "cloud_config" in data and "manage_etc_hosts" not in data['cloud_config']: - logging.warning( - "config: 'hostname_pattern' boothook is being " + - "generated but 'manage_etc_hosts' has not been specified in " + - "'cloud_config' -- you probably want to specify this as True " + - "otherwise you will get hostname resolution errors." - ) - - return { - 'mime_type': 'text/cloud-boothook', - 'content': self.HOSTNAME_BOOTHOOK_TEMPLATE.format(hostname=hostname) - } - - def ec2(self): - # LOAD STACK TEMPLATE - data = self.data['ec2'] - resources = [] - sgs = [] - - for sg_name, ingress in data['security_groups'].items(): - sg = SecurityGroup( - sg_name, - VpcId=Ref("VPC"), - GroupDescription="BaseHost Security Group", - ) - - sgs.append(sg) - resources.append(sg) - - # Because we want to be able to add ingress rules to a security - # group that referes to itself (for example allow all instances in - # the sg to speak to each other on 9300 for Elasticsearch - # clustering) we create the SG in one resource and rules as other - # resources - # - # The yaml for this case is: - # - # security_groups: - # EScluster: - # - FromPort: 9300 - # - ToPort: 9300 - # - SourceSecurityGroupId: { Ref: EScluster } - for idx, rule in enumerate(ingress): - # Convert { Ref: "x"} to Ref("x") - rule = self._convert_ref_dict_to_objects(rule) - - ingress = SecurityGroupIngress( - "{}Rule{}".format(sg_name, idx), - GroupId=Ref(sg), - **rule) - resources.append(ingress) - - devices = [] - try: - for i in data['block_devices']: - device_name = i['DeviceName'] - volume_size = i.get('VolumeSize', 20) - volume_type = i.get('VolumeType', 'standard') - iops = i.get('Iops', None) - # Check we have a permitted volume type - if volume_type not in ['standard', 'gp2', 'io1']: - raise errors.CfnConfigError("config: Volume type '%s' but must be one of standard', 'gp2' or 'io1" - % (volume_type)) - # We need to specifiy iops if we have a volume type of io1 - if volume_type == 'io1' and not iops: - raise errors.CfnConfigError("config: Volume type io1 must have Iops defined") - - # We dont set a default for iops and troposphere doesnt handle this well - if not iops: - ebs = EBSBlockDevice(VolumeType=volume_type, VolumeSize=volume_size) - else: - ebs = EBSBlockDevice(VolumeType=volume_type, VolumeSize=volume_size, Iops=iops) - - devices.append(BlockDeviceMapping( - DeviceName=device_name, - Ebs=ebs - )) - - except KeyError: - devices.append(BlockDeviceMapping( - DeviceName="/dev/sda1", - Ebs=EBSBlockDevice(VolumeSize=20), - )) - - launch_config = LaunchConfiguration( - "BaseHostLaunchConfig", - KeyName=data['parameters']['KeyName'], - SecurityGroups=[Ref(g) for g in sgs], - InstanceType=data['parameters']['InstanceType'], - AssociatePublicIpAddress=True, - IamInstanceProfile=Ref("InstanceProfile"), - ImageId=FindInMap("AWSRegion2AMI", Ref("AWS::Region"), "AMI"), - BlockDeviceMappings=devices, - ) - - user_data = self.get_ec2_userdata() - if user_data: - user_data = mime_packer.pack(user_data) - launch_config.UserData = Base64(user_data) - - resources.append(launch_config) - - # Allow deprecation of tags - ec2_tags = [] - deprecated_tags = ["Env"] - # Add a name tag for easy ec2 instance identification in the AWS console - if data['tags'].get("Name", None) is None: - ec2_tags.append(self._get_default_resource_name_tag(type="ec2")) - # Get all tags from the config - for k, v in data['tags'].items(): - if k not in deprecated_tags: - ec2_tags.append(Tag(k, v, True)) - else: - logging.warning("config: Tag '%s' is deprecated.." - % (k)) - - # Setup ASG defaults - auto_scaling_config = data.get('auto_scaling', {}) - asg_min_size = auto_scaling_config.get('min', 1) - asg_max_size = auto_scaling_config.get('max', 5) - asg_desired_size = auto_scaling_config.get('desired', 2) - health_check_type = auto_scaling_config.get('health_check_type', 'EC2').upper() - # The basic EC2 healthcheck has a low grace period need, if we switch to ELB then - # theres a lot more setup to be done before we should attempt a healthcheck - if health_check_type == 'ELB': - default_health_check_grace_period = 600 - else: - default_health_check_grace_period = 300 - health_check_grace_period = auto_scaling_config.get('health_check_grace_period', default_health_check_grace_period) - scaling_group = AutoScalingGroup( - "ScalingGroup", - VPCZoneIdentifier=[Ref("SubnetA"), Ref("SubnetB"), Ref("SubnetC")], - MinSize=asg_min_size, - MaxSize=asg_max_size, - DesiredCapacity=asg_desired_size, - AvailabilityZones=GetAZs(), - Tags=ec2_tags, - LaunchConfigurationName=Ref(launch_config), - HealthCheckGracePeriod=health_check_grace_period, - HealthCheckType=health_check_type, - ) - resources.append(scaling_group) - - return resources - - @classmethod - def _find_resources(cls, template, resource_type): - f = lambda x: x.resource_type == resource_type - return filter(f, template.resources.values()) - - @classmethod - def _get_elb_canonical_name(cls, elb_yaml_name): - return 'ELB-{}'.format(elb_yaml_name.replace('.', '')) - - def _attach_elbs(self, template): - if 'elb' not in self.data: - return template - asgs = self._find_resources(template, - 'AWS::AutoScaling::AutoScalingGroup') - if len(asgs) > 0: - elbs = self._find_resources(template, - 'AWS::ElasticLoadBalancing::LoadBalancer') - asgs[0].LoadBalancerNames = [Ref(x) for x in elbs] - template.resources[asgs[0].title] = asgs[0] - - return template - - def _get_os_data(self): - """ - Get details about the OS from the config data - - Return: - os_data(dict): Dictionary of OS data in the form - { - 'name': 'ubuntu-1404', - 'ami': 'ami-f9a62c8a', - 'region': 'eu-west-1', - 'distribution': 'ubuntu', - 'type': 'linux', - 'release': '20160509.1' - } - - Exceptions: - OSTypeNotFoundError: Raised when the OS in the config file is not - recognised - """ - os_default = 'ubuntu-1404' - available_types = { - 'ubuntu-1404': { - 'name': 'ubuntu-1404', - 'ami': 'ami-f95ef58a', - 'region': 'eu-west-1', - 'distribution': 'ubuntu', - 'type': 'linux', - 'release': '20160509.1' - }, - 'windows2012': { - 'name': 'windows2012', - 'ami': 'ami-8519a9f6', - 'region': 'eu-west-1', - 'distribution': 'windows', - 'type': 'windows', - 'release': '2015.12.31' - } - } - os_choice = self.data['ec2'].get('os', os_default) - if not available_types.get(os_choice, False): - raise errors.OSTypeNotFoundError(self.data['ec2']['os'], available_types.keys()) - os_data = available_types.get(os_choice) - ami = self.data['ec2'].get('ami') - if ami: - logging.info('** Using override AMI of ' + str(ami)) - os_data['ami'] = ami - logging.info('overridden os data is: ' + repr(os_data)) - return os_data - - def _get_default_resource_name_tag(self, type): - """ - Get the name tag we will use for ec2 instances - - Returns: - name_tag(string): The Name: tag to use. - type(string): The type of the resource - """ - # Use the stack name as the tag - value = Join("", [{"Ref": "AWS::StackName"}, "-", type]) - name_tag = Tag("Name", value, True) - return name_tag diff --git a/bootstrap_cfn/config.py.rej b/bootstrap_cfn/config.py.rej deleted file mode 100644 index f684869..0000000 --- a/bootstrap_cfn/config.py.rej +++ /dev/null @@ -1,17 +0,0 @@ -*************** -*** 744,750 **** - RecordSets=[ - RecordSet( - "TitleIsIgnoredForThisResource", -- Name="%s.%s" % (elb['name'], elb['hosted_zone']), - Type="A", - AliasTarget=AliasTarget( - GetAtt(load_balancer, "CanonicalHostedZoneNameID"), ---- 744,750 ---- - RecordSets=[ - RecordSet( - "TitleIsIgnoredForThisResource", -+ Name="%s-%s.%s" % (elb['name'], self.stack_id, elb['hosted_zone']), - Type="A", - AliasTarget=AliasTarget( - GetAtt(load_balancer, "CanonicalHostedZoneNameID"), diff --git a/bootstrap_cfn/errors.py b/bootstrap_cfn/errors.py index ce76aa7..755da7f 100644 --- a/bootstrap_cfn/errors.py +++ b/bootstrap_cfn/errors.py @@ -78,27 +78,38 @@ def __init__(self, autoscaling_group, expected_instance_count, instances): .format(expected_instance_count, autoscaling_group, len(instances), instances) ) + class TagRecordExistConflictError(BootstrapCfnError): def __init__(self, stack_tag): msg = ("An {} record already exists. ".format(stack_tag)) super(TagRecordExistConflictError, self).__init__(msg) + class ActiveTagExistConflictError(BootstrapCfnError): def __init__(self, stack_id): - msg = ("An active record already exists in. ".format(stack_id)) + msg = ("An active record already exists in {}. ".format(stack_id)) super(ActiveTagExistConflictError, self).__init__(msg) + class TagRecordNotFoundError(BootstrapCfnError): def __init__(self, tag): msg = ("Could not find a dns record for tag '{}'. ".format(tag)) super(TagRecordNotFoundError, self).__init__(msg) + class PublicELBNotFoundError(BootstrapCfnError): def __init__(self): - msg = ("Could not find a internet facing ELB. ") + msg = "Could not find an internet facing ELB according to cloudformation configuration. " super(PublicELBNotFoundError, self).__init__(msg) + class StackRecordNotFoundError(BootstrapCfnError): def __init__(self, stack_record_name): msg = ("Could not find a dns record for stack '{}'. ".format(stack_record_name)) super(StackRecordNotFoundError, self).__init__(msg) + + +class UpdateDNSRecordError(BootstrapCfnError): + def __init__(self): + msg = "Error updating dns record. " + super(UpdateDNSRecordError, self).__init__(msg) diff --git a/bootstrap_cfn/fab_tasks.py b/bootstrap_cfn/fab_tasks.py index b4278ca..721d0c4 100755 --- a/bootstrap_cfn/fab_tasks.py +++ b/bootstrap_cfn/fab_tasks.py @@ -3,7 +3,6 @@ import logging import os import sys -import time import uuid import boto3 @@ -18,10 +17,10 @@ from bootstrap_cfn.cloudformation import Cloudformation from bootstrap_cfn.config import ConfigParser, ProjectConfig from bootstrap_cfn.elb import ELB -from bootstrap_cfn.errors import PublicELBNotFoundError, TagRecordNotFoundError,\ - TagRecordExistConflictError, BootstrapCfnError, CfnConfigError,\ - CloudResourceNotFoundError, DNSRecordNotFoundError, ZoneIDNotFoundError,\ - StackRecordNotFoundError, ActiveTagExistConflictError +from bootstrap_cfn.errors import ActiveTagExistConflictError, BootstrapCfnError,\ + CfnConfigError, CloudResourceNotFoundError, DNSRecordNotFoundError,\ + PublicELBNotFoundError, StackRecordNotFoundError, TagRecordExistConflictError,\ + TagRecordNotFoundError, UpdateDNSRecordError, ZoneIDNotFoundError from bootstrap_cfn.iam import IAM from bootstrap_cfn.r53 import R53 from bootstrap_cfn.utils import tail @@ -417,21 +416,30 @@ def set_stack_name(): "in zone id '{}'...".format(stack_suffix, record, zone_id)) # Let DNS update DNSServerError propogate r53_conn = get_connection(R53) - r53_conn.update_dns_record(zone_id, record, 'TXT', '"{0}"'.format(stack_suffix)) - env.stack_name = "{0}-{1}".format(get_legacy_name(), stack_suffix) + try: + r53_conn.update_dns_record(zone_id, record, 'TXT', '"{0}"'.format(stack_suffix)) + env.stack_name = "{0}-{1}".format(get_legacy_name(), stack_suffix) + except: + raise UpdateDNSRecordError return env.stack_name + def hastag(stack_tag): """ Check if stack_tag is in use - + Args: + stack_tag: the tag of stack + Returns: + String if stack exists + None if not. """ r53_conn = get_connection(R53) zone_id = get_zone_id() - record = "{}".format(get_tag_record_name(stack_tag)) - hasrecord = r53_conn.get_record(get_zone_name(), zone_id, record, 'TXT') + record_name = get_tag_record_name(stack_tag) + hasrecord = r53_conn.get_record(get_zone_name(), zone_id, record_name, 'TXT') return hasrecord + def get_zone_name(): zone_name = get_basic_config().get('master_zone', None) if not zone_name: @@ -439,6 +447,7 @@ def get_zone_name(): logger.info("fab_tasks::get_zone_id: Found master zone '{}' in config...".format(zone_name)) return zone_name + def get_zone_id(): zone_name = get_zone_name() r53_conn = get_connection(R53) @@ -449,13 +458,19 @@ def get_zone_id(): "for zone name '{}'...".format(zone_id, zone_name)) return zone_id + def get_legacy_name(): legacy_name = "{0}-{1}".format(env.application, env.environment) return legacy_name + def get_tag_record_name(stack_tag): """ Returns record name in the format of: stack.[tag].[app]-[env] + Args: + stack_tag: the tag of stack + Returns: + record name like stack.[tag].[app]-[env] """ legacy_name = get_legacy_name() record_name = "stack.{0}.{1}".format(stack_tag, legacy_name) @@ -519,90 +534,82 @@ def cfn_delete(force=False, pre_delete_callbacks=None): kwargs of ``stack_name``, and ``config``. (Python only, not setable from command line) """ + stack_name = get_stack_name() + if not force: + x = raw_input("Are you really sure you want to blow away the whole stack for {}!? (y/n)\n".format(stack_name)) + if x not in ['y', 'Y', 'Yes', 'yes']: + sys.exit(1) cfn_config = get_config() cfn = get_connection(Cloudformation) - stack_name = get_stack_name() if pre_delete_callbacks is not None: for callback in pre_delete_callbacks: callback(stack_name=stack_name, config=cfn_config) - print green("\nSTACK {0} DELETING...\n").format(stack_name) r53_conn = get_connection(R53) - if not force: - x = raw_input("Are you really sure you want to blow away the whole stack for {}!? (y/n)\n".format(stack_name)) - if x not in ['y', 'Y', 'Yes', 'yes']: - sys.exit(1) - elb = get_one_public_elbs() + elb = get_first_public_elb() stack_id = stack_name.split('-')[-1] - - if hasattr(env, 'tag') and not env.tag =='active': + zone_name = get_zone_name() + zone_id = r53_conn.get_hosted_zone_id(zone_name) + if hasattr(env, "tag") and env.tag != 'active': + # delete inactive stack stack_tag = env.tag - logger.info("Deleting {} stack {}...".format(stack_tag, stack_name)) - # delete helloworld_12345.dsd.io [Alias record] - - stack_record_name = "{}-{}".format(elb, stack_id) - try: - stack_record_value = r53_conn.get_record(get_zone_name(), get_zone_id(), stack_record_name, 'A') - except StackRecordNotFoundError as e: - raise e(stack_record_name) - if stack_record_value: - record_name = "{}.{}".format(stack_record_name, get_zone_name()) - #stack_record_value = dns.resolver.query(stack_record_name, 'A')[0].to_text().replace('"', "") - r53_conn.delete_dns_record(get_zone_id(), record_name, 'A', stack_record_value, is_alias=True) - record_name = "{}.{}".format(get_tag_record_name(stack_tag), get_zone_name()) - try: - record_value = dns.resolver.query(record_name, 'TXT')[0].to_text().replace('"', "") - except dns.resolver.NXDOMAIN: - raise DNSRecordNotFoundError(record_name) - if record_value: - r53_conn.delete_dns_record(get_zone_id(), record_name, 'TXT', '"{}"'.format(record_value)) + logger.info("Deleting {} inactive stack {}...".format(stack_tag, stack_name)) + print green("\nSTACK {0} DELETING...\n").format(stack_name) + + # delete Alias record + elb_name = "{}-{}".format(elb, stack_id) + alias_record_object = r53_conn.get_full_record(zone_name, zone_id, elb_name, 'A') + if alias_record_object: + alias_record_value = [alias_record_object.alias_hosted_zone_id, + alias_record_object.alias_dns_name, + alias_record_object.alias_evaluate_target_health] + alias_record_name = "{}.{}".format(elb_name, zone_name) + r53_conn.delete_dns_record(zone_id, alias_record_name, 'A', alias_record_value, is_alias=True) + # delete TXT record + txt_tag_record = get_tag_record_name(stack_tag) + txt_record_name = "{}.{}".format(txt_tag_record, zone_name) + txt_record_value = '"{}"'.format(r53_conn.get_record( + zone_name, zone_id, txt_tag_record, 'TXT')) + if txt_record_value: + r53_conn.delete_dns_record(zone_id, txt_record_name, 'TXT', txt_record_value) - #delete vpc dependencies before cfn - cfn.delete(stack_name) # Wait for stacks to delete print 'Waiting for stack to delete.' + cfn.delete(stack_name) + if not env.blocking: + print 'Running in non blocking mode. Exiting.' + sys.exit(0) tail(cfn, stack_name) if cfn.stack_missing(stack_name): print green("Stack successfully deleted") else: - print red("Stack deletion was unsuccessfull") + print red("Stack deletion was unsuccessful") else: - # delete dns records instead of stacks - stack_tag = 'active' - # stack.active.helloworld-dev(.dsd.io) - tag_record_name = get_tag_record_name(stack_tag) - try: - record_value = r53_conn.get_record(get_zone_name(), get_zone_id(), tag_record_name, 'TXT') - except TagRecordNotFoundError as e: - raise e(tag_record_name) + # delete active dns records - record_name = '{}.{}'.format(tag_record_name, get_zone_name()) - record = '"{}"'.format(record_value) + stack_tag = 'active' + print green("\nDELETING ACTIVE DNS RECORDS...\n") - # delete [helloworld.dsd.io] record - main_record_name = "{}.{}".format(elb, get_zone_name()) + # delete 'A' record + main_record_name = "{}.{}".format(elb, zone_name) stack_record_name = "{}-{}".format(elb, stack_id) - try: - stack_record_value = r53_conn.get_record(get_zone_name(), get_zone_id(), stack_record_name, 'A') - except TagRecordNotFoundError as e: - raise e(stack_record_name) - try: - main_record_value = r53_conn.get_record(get_zone_name(), get_zone_id(), elb, 'A') - except TagRecordNotFoundError as e: - raise e(elb) - if stack_record_value and stack_record_value.to_print() == main_record_value.to_print(): - r53_conn.delete_dns_record(get_zone_id(), main_record_name, 'A', main_record_value, is_alias=True) - - if stack_id and stack_id == record.replace('"',""): - # delete [stack.active.helloworld-dev.dsd.io] record - r53_conn.delete_dns_record(get_zone_id(), record_name, 'TXT', record) - # delete cfn stack at last because helloworld-12345.dsd.io Alias record value is needed ^ - - if not env.blocking: - print 'Running in non blocking mode. Exiting.' - sys.exit(0) + stack_record_object = r53_conn.get_full_record(zone_name, zone_id, stack_record_name, 'A') + main_record_object = r53_conn.get_full_record(zone_name, zone_id, elb, 'A') + main_record_value = [main_record_object.alias_hosted_zone_id, + main_record_object.alias_dns_name, + main_record_object.alias_evaluate_target_health] + if stack_record_object and stack_record_object.to_print() == main_record_object.to_print(): + r53_conn.delete_dns_record(zone_id, main_record_name, 'A', main_record_value, is_alias=True) + + # delete 'TXT' record + tag_record_name = get_tag_record_name(stack_tag) + record_value = '"{}"'.format(r53_conn.get_record( + zone_name, zone_id, tag_record_name, 'TXT')) + record_name = '{}.{}'.format(tag_record_name, zone_name) + if stack_id and stack_id == record_value[1:-1]: + r53_conn.delete_dns_record(zone_id, record_name, 'TXT', record_value) if 'ssl' in cfn_config.data: iam = get_connection(IAM) @@ -675,7 +682,6 @@ def update_certs(): cfn_config = get_config() iam = get_connection(IAM) # Upload any SSL certificates to our EC2 instances - updated_count = False if 'ssl' in cfn_config.data: logger.info("Reloading SSL certificates...") updated_count = iam.update_ssl_certificates(cfn_config.ssl(), @@ -793,82 +799,113 @@ def cycle_instances(delay=None): termination_delay = None asg.cycle_instances(termination_delay=termination_delay) + @task -def set_active_stack(tag, force=False): +def set_active_stack(stack_tag, force=False): """ Switch between stacks tagged differently Update 'active' stacks' DNS records to the one specified. - + Args: + stack_tag: the tag of stack to be active + force: if True, set it to active stack directly """ # helloworld.active.dsd.io active_record = get_tag_record_name('active') r53_conn = get_connection(R53) zone_name = get_zone_name() zone_id = get_zone_id() - active_stack_id = r53_conn.get_record(zone_name, zone_id, active_record, 'TXT') - if has_active_stack() and not force: - x = raw_input("The current active stack is {}. Do you wanna change? (y/n)\n".format(active_stack_id)) + + tag_record = get_tag_record_name(stack_tag) + tag_stack_id = r53_conn.get_record(zone_name, zone_id, tag_record, 'TXT') + if not tag_stack_id: + raise TagRecordNotFoundError(tag_record) + + if get_active_stack() and not force: + x = raw_input("Your stack is {}. Do you wanna change? (y/n)\n".format(tag_stack_id)) if x not in ['y', 'Y', 'Yes', 'yes']: sys.exit(1) - # helloworld.active.dsd.io - tag_record = get_tag_record_name(tag) - if not tag_record: - raise TagRecordNotFoundError(tag) - else: - tag_stack_id = r53_conn.get_record(zone_name, zone_id, tag_record, 'TXT') # update TXT record - # Point [helloworld.dsd.io]'s stack_id to [helloworld-tag.dsd.io]'s - r53_conn.update_dns_record(zone_id, "{}.{}".format(active_record, get_zone_name()),'TXT', '"{}"'.format(tag_stack_id)) - logger.info("fab_tasks::set_active_stack: Successfully updated dns alias record") - + try: + r53_conn.update_dns_record(zone_id, "{}.{}".format(active_record, get_zone_name()), 'TXT', + '"{}"'.format(tag_stack_id)) + logger.info("fab_tasks::set_active_stack: Successfully updated dns alias record") + except: + raise UpdateDNSRecordError - # get all public facing elbs - elb = get_one_public_elbs() + # get the first public facing elb + elb = get_first_public_elb() # helloworld.dsd.io main_record_name = "{}.{}".format(elb, zone_name) # helloworld-12345.dsd.io stack_record_name = "{}-{}.{}".format(elb, tag_stack_id, zone_name) # get the ELB value in stack_record_name's record - record_value = r53_conn.get_record(zone_name, zone_id, "{}-{}".format(elb, tag_stack_id), 'A') + record_name = "{}-{}".format(elb, tag_stack_id) + record_object = r53_conn.get_full_record(zone_name, zone_id, record_name, 'A') + record_value = [record_object.alias_hosted_zone_id, + record_object.alias_dns_name, + record_object.alias_evaluate_target_health] if record_value: # point [helloworld.dsd.io] to [helloworld-12345.dsd.io]'s ELB - r53_conn.update_dns_record(zone_id, main_record_name,'A', record_value, is_alias=True) - logger.info("fab_tasks::set_active_stack: Successfully updated dns alias record") - logger.info("Active stack is changed to {}".format(tag_record)) + try: + r53_conn.update_dns_record(zone_id, main_record_name, 'A', record_value, is_alias=True) + logger.info("fab_tasks::set_active_stack: Successfully updated dns alias record") + print green("Active stack is switched to {}".format(tag_record)) + except: + raise UpdateDNSRecordError return True else: raise StackRecordNotFoundError(stack_record_name) -def has_active_stack(): - active_record = get_tag_record_name('active') - r53_conn = get_connection(R53) - zone_name = get_zone_name() - zone_id = get_zone_id() - active_stack_id = r53_conn.get_record(zone_name, zone_id, active_record, 'TXT') - if not active_stack_id: - return False - elb = get_one_public_elbs() - dns_record_name = '{}-{}'.format(elb, active_stack_id) - dns_record_value = r53_conn.get_record(zone_name, zone_id, dns_record_name, 'A') - main_record_value = r53_conn.get_record(zone_name, zone_id, elb, 'A') - if dns_record_value == main_record_value: - return True + +@task +def get_active_stack(): + """ + Returns stack id if active stack exists AND Alias record is set appropriately + """ + try: + active_record = get_tag_record_name('active') + r53_conn = get_connection(R53) + zone_name = get_zone_name() + zone_id = get_zone_id() + active_stack_id = r53_conn.get_record(zone_name, zone_id, active_record, 'TXT') + elb = get_first_public_elb() + dns_record_name = '{}-{}'.format(elb, active_stack_id) + dns_record_value = r53_conn.get_record(zone_name, zone_id, dns_record_name, 'A') + main_record_value = r53_conn.get_record(zone_name, zone_id, elb, 'A') + except: + print green("No active stack exists.") + return None + if active_stack_id and dns_record_value and dns_record_value == main_record_value: + logger.info("fab_tasks::get_active_stack: " + "Active stack id is: {}".format(active_stack_id)) + print green("Active stack id is: {}".format(active_stack_id)) + return active_stack_id + else: + print green("No active stack exists.") + return None + def get_all_elbs(): + """ + Returns all internet-facing elbs from cloudformation configuration + """ cfn_config = get_config() - elbs = [ x.get('name') for x in cfn_config.data.get('elb', {}) if x.get('scheme') == 'internet-facing'] + elbs = [x.get('name') for x in cfn_config.data.get('elb', {}) if x.get('scheme') == 'internet-facing'] return elbs -def get_one_public_elbs(): - # elbs: 0? 1? N? + +def get_first_public_elb(): + """ + Returns the first elb if exists. + """ elbs = get_all_elbs() if len(elbs) < 1: raise PublicELBNotFoundError elif len(elbs) == 1: logger.info("fab_tasks::set_active_stack: Found one ELB '{}', " - "using it for public ELB... ".format(elbs[0])) + "using it for public ELB... ".format(elbs[0])) else: logger.info("fab_tasks::set_active_stack: Found multiple ELBs," - "using the first one '{}' as public ELB".format(elbs[0])) - return elbs[0] \ No newline at end of file + "using the first one '{}' as public ELB".format(elbs[0])) + return elbs[0] diff --git a/bootstrap_cfn/fab_tasks.py.orig b/bootstrap_cfn/fab_tasks.py.orig deleted file mode 100755 index 4ad57e9..0000000 --- a/bootstrap_cfn/fab_tasks.py.orig +++ /dev/null @@ -1,704 +0,0 @@ -#!/usr/bin/env python - -import logging -import os -import sys -import time -import uuid - -import boto3 - -import dns.resolver - -from fabric.api import env, task -from fabric.colors import green, red -from fabric.utils import abort - -from bootstrap_cfn.autoscale import Autoscale -from bootstrap_cfn.cloudformation import Cloudformation -from bootstrap_cfn.config import ConfigParser, ProjectConfig -from bootstrap_cfn.elb import ELB -from bootstrap_cfn.errors import BootstrapCfnError, CfnConfigError, CloudResourceNotFoundError, DNSRecordNotFoundError, ZoneIDNotFoundError -from bootstrap_cfn.iam import IAM -from bootstrap_cfn.r53 import R53 -from bootstrap_cfn.utils import tail -from bootstrap_cfn.vpc import VPC - - -# Default fab config. Set via the tasks below or --set -env.setdefault('application') -env.setdefault('environment') -env.setdefault('aws') -env.setdefault('config') -env.setdefault('stack_passwords') -env.setdefault('blocking', True) -env.setdefault('aws_region', 'eu-west-1') - -# GLOBAL VARIABLES -TIMEOUT = 3600 -RETRY_INTERVAL = 10 - -# This is needed because pkgutil wont pick up modules -# imported in a fabfile. -path = env.real_fabfile or os.getcwd() -sys.path.append(os.path.dirname(path)) - -# Set up the logging -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger("bootstrap-cfn") -logging.getLogger("requests").setLevel(logging.WARNING) - - -@task -def aws(profile_name): - """ - Set the AWS account to use - - Sets the environment variable 'aws' to the name of the - account to use in the AWS config file (~/.aws/credentials.yaml) - - Args: - profile_name(string): The string to set the environment - variable to - """ - env.aws = str(profile_name).lower() - # Setup boto so we actually use this environment - boto3.setup_default_session(profile_name=env.aws, - region_name=env.aws_region) - - -@task -def environment(environment_name): - """ - Set the environment section to be read from the project config - file - - Sets the environment variable 'environment'. - The named section will be read from the project's YAML file - - Args: - environment_name(string): The string to set the - variable to - """ - env.environment = str(environment_name).lower() - - -@task -def application(application_name): - """ - Set the application name - - Sets the environment variable 'application' to - an application name. Which is just a name to - associate with Cloudformation stack - - Args: - application_name(string): The string to set the - variable to - """ - env.application = str(application_name).lower() - - -@task -def tag(tag): - """ - Set a tag for the stack - - Sets the environment variable 'tag' - This gets used to store a DNS entry to identify - multiple stacks with the same name. - e.g. you can tag a stack as active, or inactive, - green or blue etc. - - Args: - tag(string): The string to set the - variable to - """ - env.tag = str(tag).lower() - - -@task -def config(config_file): - """ - Set the location of the project's YAML file - - Sets the environment variable 'config' to be - the location of the project's YAML config - file - - Args: - config_file(string): The string to set the - variable to - """ - env.config = str(config_file).lower() - - -@task -def passwords(passwords_file): - """ - Set the path to the project's password YAML config file - - Set the environment variable 'stack_passwords' to the - path of the project's password file. This will be used - to load in a dictionary of passwords to use with the - project's components - - Args: - passwords_file(string): The string to set the - variable to - """ - env.stack_passwords = str(passwords_file).lower() - - -@task -def blocking(block): - """ - Set to block while waiting for stack creation or deletion to complete - - Sets the environment variable 'blocking' to True to wait on stack - creation or deletion to complete before returning from the script. - If false the cloudformation task will be started and the script - will immediately exit - - Args: - block(string): The string to set the - variable to. Must be one of yes, true, - t or 1 - """ - env.blocking = str(block).lower() in ("yes", "true", "t", "1") - - -@task -def user(username): - """ - Sets the username to use for ssh to created instances - - Sets the environment variable 'user' to the ssh username - to use when trying to connect to a remote instance - - Args: - username(string): The string to set the - variable to. - """ - env.user = username - - - -def apply_maintenance_criteria(elb): - ''' - Applies maintenance criteria to elb - - Returns True if the maintenance should continue - ''' - return elb['scheme'] == 'internet-facing' - -@task -def swap_tags(tag1, tag2): - """ - Swap two tagged stacks. - - i.e. update the DNS text record which defines the - random suffix associated with a stack tag. - """ - cfn_config = get_config() - r53_conn = get_connection(R53) - zone_name = cfn_config.data['master_zone'] - zone_id = r53_conn.get_hosted_zone_id(zone_name) - legacy_name = "{0}-{1}".format(env.application, env.environment) - record1 = "stack.{0}.{1}".format(tag1, legacy_name) - record2 = "stack.{0}.{1}".format(tag2, legacy_name) - stack_suffix1 = r53_conn.get_record(zone_name, zone_id, record1, 'TXT') - stack_suffix2 = r53_conn.get_record(zone_name, zone_id, record2, 'TXT') - fqdn1 = "{0}.{1}".format(record1, zone_name) - fqdn2 = "{0}.{1}".format(record2, zone_name) - r53_conn.update_dns_record(zone_id, fqdn1, 'TXT', '"{0}"'.format(stack_suffix2)) - r53_conn.update_dns_record(zone_id, fqdn2, 'TXT', '"{0}"'.format(stack_suffix1)) - -@task -def enter_maintenance(maintenance_ip, dry_run=False): - ''' - Puts stack into maintenance mode - - Sets all internet facing elb hostnames to resolve to given maintenance_ip - ''' - cfn_config = get_config() - r53_conn = get_connection(R53) - - cached_zone_ids = {} - for elb in cfn_config.data['elb']: - if not apply_maintenance_criteria(elb): - continue - - record = "{name}.{hosted_zone}".format(**elb) - zone_id = get_cached_zone_id(r53_conn, cached_zone_ids, elb['hosted_zone']) - print green("Attempting to update: \"{0}\":\"{1}\"".format(record, maintenance_ip)) - r53_conn.update_dns_record(zone_id, record, 'A', maintenance_ip, dry_run=dry_run) - - -@task -def exit_maintenance(dry_run=False): - """ - Exit maintenance mode - - Sets internet-facing elbs hostnames - back to the ELB DNS alias - """ - r53_conn = get_connection(R53) - elb_conn = get_connection(ELB) - - cfn_config = get_config() - stack_name = get_stack_name() - - # In order to traverse from config yaml all the way to the DNS alias for the ELB - # it is required to construct a logical to physical naming for the elbs. So first - # get all elbs for this stack from AWS cloudformation, to be used as a - # filter on the next step - # Note: if stack does not exist this will throw a BotoServerError - stack_elbs = dict([ - (x.get('logical_resource_id', x.get('LogicalResourceId', None)), - x.get('physical_resource_id', x.get('PhysicalResourceId', None))) - for x in elb_conn.cfn.get_stack_load_balancers(stack_name)]) - if None in stack_elbs.keys(): - raise BootstrapCfnError( - "Unable to retrieve logical resource IDs for a stack load balancer.\n" - "ELB Dict: ".format(stack_elbs)) - if None in stack_elbs.values(): - raise BootstrapCfnError( - "Unable to retrieve physical resource IDs for a stack load balancer.\n" - "ELB Dict: ".format(stack_elbs)) - - # filter stack related load balancers (as opposed to all stack elbs in the account) - full_load_balancers = elb_conn.conn_elb.get_all_load_balancers( - load_balancer_names=stack_elbs.values()) - - cached_zone_ids = {} - # loop through elb config entries and change internet facing ones - for elb in cfn_config.data['elb']: - if not apply_maintenance_criteria(elb): - continue - record = "{name}.{hosted_zone}".format(**elb) - # obtain physical name from dict lookup, by converting elb name into safe name - # into logical name - phys_name = stack_elbs[mold_to_safe_elb_name(elb['name'])] - - dns_name = [x.dns_name for x in full_load_balancers if x.name == phys_name] - if len(dns_name) == 1: - dns_name = dns_name[0] - else: - raise BootstrapCfnError( - "Lookup for elb with physical name \"{0}\" returned {1} load balancers, " - "while only exactly 1 was expected".format(phys_name, len(dns_name))) - zone_id = get_cached_zone_id(r53_conn, cached_zone_ids, elb['hosted_zone']) - - # For record_value provide list of params as needed by function set_alias - # http://boto.readthedocs.org/en/latest/ref/route53.html#boto.route53.record.Record.set_alias - record_value = [ - # alias_hosted_zone_id - R53.AWS_ELB_ZONE_ID[env.aws_region], - # alias_dns_name - dns_name, - # alias_evaluate_target_health (True/False) - False - ] - print green("Attempting to update: \"{0}\":{1}".format(record, record_value)) - r53_conn.update_dns_record(zone_id, record, 'A', record_value, is_alias=True, dry_run=dry_run) - - -def get_cached_zone_id(r53_conn, zone_dict, zone_name): - ''' - Gets and cache zone id from route53 - - If we are looping through ELBs we may just have different hostnames in same zone, - so feel free to cache it (and drink a shot because I said 'cache') - - raises CloudResourceNotFoundError if zone is not found - ''' - if zone_name not in zone_dict: - # not found, look it up, cache it up .. - lookup_zone = r53_conn.get_hosted_zone_id(zone_name) - if not lookup_zone: - raise CloudResourceNotFoundError("Zone ID not found for zone: {}".format(zone_name)) - zone_dict[zone_name] = lookup_zone - return zone_dict[zone_name] - - -def mold_to_safe_elb_name(elb_name): - ''' - Molds the elb_name to match cloudformation naming of ELBs - ''' - return 'ELB' + elb_name.replace('-', '').replace('.', '').replace('_', '') - - -def get_stack_name(new=False): - """ - Get the name of the stack - - The name of the stack is a combination - of the application and environment names - and a randomly generated suffix. - - The env.tag dictates which randomly generated suffix - the default env.tag is 'active' - - If new=True we generate a new stack_name and create the - dns records to retreive it in the future. - - """ - if new: - # For back-compatibility - set_stack_name() - - if hasattr(env, 'tag'): - stack_tag = env.tag - else: - stack_tag = 'active' - env.tag = stack_tag - if not hasattr(env, 'stack_name'): - legacy_name = "{0}-{1}".format(env.application, env.environment) - # get_config needs a stack_name so this is a hack because we don't - # know it yet... - env.stack_name = 'temp' - zone_name = get_basic_config().get('master_zone', None) - if not zone_name: - raise CfnConfigError("No master_zone in yaml, unable to create/find DNS records for stack name") - logger.info("fab_tasks::get_stack_name: Found master zone '{}' in config...".format(zone_name)) - - record_name = "stack.{0}.{1}".format(stack_tag, legacy_name) - dns_name = "{}.{}".format(record_name, zone_name) - try: - stack_suffix = dns.resolver.query(dns_name, 'TXT')[0].to_text().replace('"', "") - logger.info("fab_tasks::get_stack_name: Found stack suffix '{}' " - "for dns record '{}'... ".format(stack_suffix, dns_name)) - env.stack_name = "{0}-{1}".format(legacy_name, stack_suffix) - logger.info("fab_tasks::get_stack_name: Found stack name '{}'...".format(env.stack_name)) - except dns.resolver.NXDOMAIN: - raise DNSRecordNotFoundError(zone_name) - - return env.stack_name - - -def set_stack_name(): - """ - Set the name of the stack - - The name of the stack is a combination - of the application and environment names - and a randomly generated suffix. - - The env.tag dictates which randomly generated suffix - tag='active' should be saved as key to switch stacks. - - We generate a new stack_name and create the - dns records to retreive it in the future. - - """ - env.stack_name = 'temp' - cfn_config = get_config() - if not hasattr(env, 'tag'): - stack_tag = cfn_config.stack_id - else: - stack_tag = env.tag - if env.tag=='active': - raise CfnConfigError("The value of tag cannot be 'active'") - env.tag = stack_tag - legacy_name = "{0}-{1}".format(env.application, env.environment) - # get_config needs a stack_name so this is a hack because we don't - # know it yet... - r53_conn = get_connection(R53) - zone_name = cfn_config.data.get('master_zone', None) - if not zone_name: - raise CfnConfigError("No master_zone in yaml, unable to create/find DNS records for stack name") - logger.info("fab_tasks::set_stack_name: Found master zone '{}' in config...".format(zone_name)) - - zone_id = r53_conn.get_hosted_zone_id(zone_name) - if not zone_id: - raise ZoneIDNotFoundError(zone_name) - logger.info("fab_tasks::set_stack_name: Found zone id '{}' " - "for zone name '{}'...".format(zone_id, zone_name)) - record_name = "stack.{0}.{1}".format(stack_tag, legacy_name) - - stack_suffix = cfn_config.stack_id - record = "{0}.{1}".format(record_name, zone_name) - logger.info("fab_tasks::set_stack_name: " - "Creating stack suffix {} " - "for record '{}' " - "in zone id '{}'...".format(stack_suffix, record, zone_id)) - # Let DNS update DNSServerError propogate - r53_conn.update_dns_record(zone_id, record, 'TXT', '"{0}"'.format(stack_suffix)) - env.stack_name = "{0}-{1}".format(legacy_name, stack_suffix) - return env.stack_name - - -def _validate_fabric_env(): - if env.aws is None: - sys.exit("\n[ERROR] Please specify an AWS account, e.g 'aws:dev'") - if env.environment is None: - sys.exit("\n[ERROR] Please specify an environment, e.g 'environment:dev'") - if env.application is None: - sys.exit("\n[ERROR] Please specify an application, e.g 'application:peoplefinder'") - if env.config is None: - sys.exit("\n[ERROR] Please specify a config file, e.g 'config:/tmp/sample-application.yaml'") - elif not os.path.isfile(env.config): - sys.exit("\n[ERROR] Config file %s does not exist" % str(env.config)) - - if env.stack_passwords is not None and not os.path.exists(env.stack_passwords): - print >> sys.stderr, "\n[ERROR] Passwords file '{0}' doesn't exist!".format(env.stack_passwords) - sys.exit(1) - - -def get_basic_config(): - """ - Returns the basic unparsed configuration file for the project - """ - _validate_fabric_env() - project_config = ProjectConfig( - env.config, - env.environment, - passwords=env.stack_passwords) - return project_config.config - - -def get_config(): - Parser = env.get('cloudformation_parser', ConfigParser) - cfn_config = Parser(get_basic_config(), get_stack_name(), environment=env.environment, application=env.application) - return cfn_config - - -def get_connection(klass): - _validate_fabric_env() - return klass(env.aws, env.aws_region) - - -@task -def cfn_delete(force=False, pre_delete_callbacks=None): - """ - Delete the AWS Cloudformation stack - - Deletes the stack and the associated SSL certificates - - Args: - force(bool): True to destroy the stack without any further - input, False to require confirmation before deletion - pre_delete_callbacks(list of callables): callable to invoke before - trying to run the DeleteStack call. Each callback is called with - kwargs of ``stack_name``, and ``config``. (Python only, not setable from - command line) - """ - stack_name = get_stack_name() - if not force: - x = raw_input("Are you really sure you want to blow away the whole stack for {}!? (y/n)\n".format(stack_name)) - if x not in ['y', 'Y', 'Yes', 'yes']: - sys.exit(1) - cfn_config = get_config() - cfn = get_connection(Cloudformation) - - if pre_delete_callbacks is not None: - for callback in pre_delete_callbacks: - callback(stack_name=stack_name, config=cfn_config) - - print green("\nSTACK {0} DELETING...\n").format(stack_name) - - cfn.delete(stack_name) - - if not env.blocking: - print 'Running in non blocking mode. Exiting.' - sys.exit(0) - - # Wait for stacks to delete - print 'Waiting for stack to delete.' - - tail(cfn, stack_name) - - if cfn.stack_missing(stack_name): - print green("Stack successfully deleted") - else: - print red("Stack deletion was unsuccessfull") - - if 'ssl' in cfn_config.data: - iam = get_connection(IAM) - iam.delete_ssl_certificate(cfn_config.ssl(), stack_name) - - -@task -def cfn_create(test=False): - """ - Create the AWS cloudformation stack. - - Using the configuration files, a full cloudformation - specification will be generated and used to create a - stack on AWS. - """ - stack_name = get_stack_name(new=True) - cfn_config = get_config() - - cfn = get_connection(Cloudformation) - if test: - print cfn_config.process() - return - # Upload any SSL certs that we may need for the stack. - if 'ssl' in cfn_config.data: - print green("Uploading SSL certificates to stack") - iam = get_connection(IAM) - iam.upload_ssl_certificate(cfn_config.ssl(), stack_name) - # Useful for debug - # print cfn_config.process() - # Inject security groups in stack template and create stacks. - try: - stack = cfn.create(stack_name, cfn_config.process(), tags=get_cloudformation_tags()) - except: - # cleanup ssl certificates if any - if 'ssl' in cfn_config.data: - print red("Deleting SSL certificates from stack") - iam.delete_ssl_certificate(cfn_config.ssl(), stack_name) - import traceback - abort(red("Failed to create: {error}".format(error=traceback.format_exc()))) - - print green("\nSTACK {0} CREATING...\n").format(stack_name) - - if not env.blocking: - print 'Running in non blocking mode. Exiting.' - sys.exit(0) - - tail(cfn, stack_name) - stack_evt = cfn.get_last_stack_event(stack) - - if stack_evt.resource_status == 'CREATE_COMPLETE': - print 'Successfully built stack {0}.'.format(stack) - else: - # So delete the SSL cert that we uploaded - if 'ssl' in cfn_config.data: - iam.delete_ssl_certificate(cfn_config.ssl(), stack_name) - abort('Failed to create stack: {0}'.format(stack)) - - -@task -def update_certs(): - """ - Update the ssl certificates - - This will read in the certificates from the config - file, update them in AWS Iam, and then also handle - setting the certificates on ELB's - """ - - stack_name = get_stack_name() - cfn_config = get_config() - iam = get_connection(IAM) - # Upload any SSL certificates to our EC2 instances - updated_count = False - if 'ssl' in cfn_config.data: - logger.info("Reloading SSL certificates...") - updated_count = iam.update_ssl_certificates(cfn_config.ssl(), - stack_name) - else: - logger.error("No ssl section found in cloud config file, aborting...") - sys.exit(1) - - # Set the certificates on ELB's if we have any - if updated_count: - if 'elb' in cfn_config.data: - logger.info("Setting load balancer certificates...") - elb = get_connection(ELB) - replaced_certs = elb.set_ssl_certificates(updated_count, - stack_name, - max_retries=3, - retry_delay=10) - for cert_name in replaced_certs: - logger.info("Deleting replaced certificate '%s'..." - % (cert_name)) - iam.delete_certificate(cert_name, - stack_name, - max_retries=3, - retry_delay=10) - else: - logger.error("No certificates updated so skipping " - "ELB certificate update...") - - -def get_cloudformation_tags(): - """ - Get a top-level set of tags for the stack, these will propagate - down so that many of the created resources will be tagged in - addition. Notable omissions are EBS volumes and route53 resources - """ - return { - "Env": env.environment, - "Application": env.application - } - - -@task -def display_elb_dns_entries(): - """ - Prints out the ELB name(s) and the corresponding DNS name(s) for every ELB - in the environment provided. - """ - stack_name = get_stack_name() - elb = get_connection(ELB) - elb_dns_list = elb.list_domain_names(stack_name) - for elb_dns in elb_dns_list: - print "\n\nELB name: {0} DNS: {1}".format(elb_dns['elb_name'], elb_dns['dns_name']) - - -@task -def enable_vpc_peering(): - """ - Enables vpc peering to stacks named in the cloudformation config. - """ - # peer vpc - cfg = get_config() - vpc_cfg = cfg.data.get('vpc', False) - if vpc_cfg: - vpc_obj = VPC(cfg.data, get_stack_name()) - vpc_obj.enable_peering() - - -@task -def disable_vpc_peering(): - """ - Disables vpc peering to stacks named in the cloudformation config. - """ - # peer vpc - cfg = get_config() - vpc_cfg = cfg.data.get('vpc', False) - if vpc_cfg: - vpc_obj = VPC(cfg.data, get_stack_name()) - vpc_obj.disable_peering() - - -@task -def set_autoscaling_desired_capacity(capacity, block=True): - """ - Set the desired capacity the autoscaling group - - Args: - capacity(int): Number of instances desired in - the autoscaling group. - block(bool): Wait for instances to become healthy - and in-service. - """ - asg = get_connection(Autoscale) - if not asg.group: - asg.set_autoscaling_group(get_stack_name()) - asg.set_autoscaling_desired_capacity(capacity=int(capacity)) - if block: - asg.wait_for_instances(int(capacity)) - - -@task -def cycle_instances(delay=None): - """ - Cycle the instances in the autoscaling group - - Args: - delay(int): Number of seconds between new instance - becoming healthy and killing the old one. - """ - asg = get_connection(Autoscale) - if not asg.group: - asg.set_autoscaling_group(get_stack_name()) - if delay: - termination_delay = int(delay) - else: - termination_delay = None - asg.cycle_instances(termination_delay=termination_delay) diff --git a/bootstrap_cfn/fab_tasks.py.rej b/bootstrap_cfn/fab_tasks.py.rej deleted file mode 100644 index c46d120..0000000 --- a/bootstrap_cfn/fab_tasks.py.rej +++ /dev/null @@ -1,59 +0,0 @@ -*************** -*** 393,410 **** - dns records to retreive it in the future. - - """ - if hasattr(env, 'tag'): -- stack_tag = env.tag - else: -- stack_tag = 'active' - env.tag = stack_tag - legacy_name = "{0}-{1}".format(env.application, env.environment) - # get_config needs a stack_name so this is a hack because we don't - # know it yet... - env.stack_name = 'temp' -- cfn_config = get_config() - r53_conn = get_connection(R53) -- zone_name = cfn_config.data.get('master_zone', None) - if not zone_name: - raise CfnConfigError("No master_zone in yaml, unable to create/find DNS records for stack name") - logger.info("fab_tasks::set_stack_name: Found master zone '{}' in config...".format(zone_name)) ---- 393,414 ---- - dns records to retreive it in the future. - - """ -+ stack_suffix = uuid.uuid4().__str__()[-8:] - if hasattr(env, 'tag'): -+ # should check if the tag is used already -+ if env.tag == 'active': -+ raise CfnConfigError("tag cannot be 'active'") -+ else: -+ stack_tag = env.tag - else: -+ stack_tag = stack_suffix - env.tag = stack_tag - legacy_name = "{0}-{1}".format(env.application, env.environment) - # get_config needs a stack_name so this is a hack because we don't - # know it yet... - env.stack_name = 'temp' - r53_conn = get_connection(R53) -+ zone_name = get_basic_config().get('master_zone', None) - if not zone_name: - raise CfnConfigError("No master_zone in yaml, unable to create/find DNS records for stack name") - logger.info("fab_tasks::set_stack_name: Found master zone '{}' in config...".format(zone_name)) -*************** -*** 416,422 **** - "for zone name '{}'...".format(zone_id, zone_name)) - record_name = "stack.{0}.{1}".format(stack_tag, legacy_name) - -- stack_suffix = uuid.uuid4().__str__()[-8:] - record = "{0}.{1}".format(record_name, zone_name) - logger.info("fab_tasks::set_stack_name: " - "Creating stack suffix {} " ---- 420,425 ---- - "for zone name '{}'...".format(zone_id, zone_name)) - record_name = "stack.{0}.{1}".format(stack_tag, legacy_name) - - record = "{0}.{1}".format(record_name, zone_name) - logger.info("fab_tasks::set_stack_name: " - "Creating stack suffix {} " diff --git a/bootstrap_cfn/r53.py b/bootstrap_cfn/r53.py index 856ebf5..84719e4 100644 --- a/bootstrap_cfn/r53.py +++ b/bootstrap_cfn/r53.py @@ -30,39 +30,37 @@ def __init__(self, aws_profile_name, aws_region_name='eu-west-1'): self.conn_r53 = utils.connect_to_aws(boto.route53, self) def get_hosted_zone_id(self, zone_name): - ''' - Take a zone name - Return a zone id or None if no zone found - ''' + """ + Args: + zone_name + Returns: + a zone id or None if no zone found + """ zone = self.conn_r53.get_hosted_zone_by_name(zone_name) if zone: zone = zone['GetHostedZoneResponse']['HostedZone']['Id'] return zone.replace('/hostedzone/', '') def update_dns_record(self, zone, record, record_type, record_value, is_alias=False, dry_run=False): - ''' + """ Updates a dns record in route53 - - zone -- a string specifying the zone id - record -- a string for the record to update - record_value -- a string if it is not an alias - a list, if it is an alias, of parameters to pass to - boto record.set_alias() function - is_alias -- a boolean to show if record_value is an alias - record_type -- a string to specify the record, eg "A" - - + Args: + zone: a string specifying the zone id + record: a string for the record to update + record_value: a string if it is not an alias + a list, if it is an alias, of parameters to pass to + boto record.set_alias() function + is_alias: a boolean to show if record_value is an alias + record_type: a string to specify the record, eg "A" + dry_run: Returns True if update successful or raises an exception if not - ''' + """ changes = boto.route53.record.ResourceRecordSets(self.conn_r53, zone) change = changes.add_change("UPSERT", record, record_type, ttl=60) if is_alias: # provide list of params as needed by function set_alias # http://boto.readthedocs.org/en/latest/ref/route53.html#boto.route53.record.Record.set_alias - alias_hosted_zone_id = record_value.alias_hosted_zone_id - alias_dns_name = record_value.alias_dns_name - alias_evaluate_target_health = record_value.alias_evaluate_target_health - change.set_alias(alias_hosted_zone_id, alias_dns_name, alias_evaluate_target_health) + change.set_alias(*record_value) else: change.add_value(record_value) if dry_run: @@ -71,26 +69,27 @@ def update_dns_record(self, zone, record, record_type, record_value, is_alias=Fa changes.commit() return True - def delete_dns_record(self, zone, record, record_type, record_value, is_alias=False, dry_run=False): - ''' + def delete_dns_record(self, zone_id, record_name, record_type, record_value, is_alias=False, dry_run=False): + """ Delete a dns record in route53 - - zone -- a string specifying the zone id - record -- a string for the record to update - record_type -- a string to specify the record, eg "A" - - - Returns True if update successful or raises an exception if not - ''' - changes = boto.route53.record.ResourceRecordSets(self.conn_r53, zone) - change = changes.add_change("DELETE", record, record_type, ttl=60) + Args: + zone_id: a string specifying the zone id + record_name: a string for the record to update + record_value: a string if it is not an alias + a list, if it is an alias, of parameters to pass to + boto record.set_alias() function + record_type: a string to specify the record, eg "A" + is_alias: + dry_run: + Returns: + True if update successful or raises an exception if not + """ + changes = boto.route53.record.ResourceRecordSets(self.conn_r53, zone_id) + change = changes.add_change("DELETE", record_name, record_type, ttl=60) if is_alias: # provide list of params as needed by function set_alias # http://boto.readthedocs.org/en/latest/ref/route53.html#boto.route53.record.Record.set_alias - alias_hosted_zone_id = record_value.alias_hosted_zone_id - alias_dns_name = record_value.alias_dns_name - alias_evaluate_target_health = record_value.alias_evaluate_target_health - change.set_alias(alias_hosted_zone_id, alias_dns_name, alias_evaluate_target_health) + change.set_alias(*record_value) else: change.add_value(record_value) if dry_run: @@ -99,16 +98,43 @@ def delete_dns_record(self, zone, record, record_type, record_value, is_alias=Fa res = changes.commit() return res - def get_record(self, zone, zone_id, record, record_type): - ''' - ''' - fqdn = "{0}.{1}.".format(record, zone) - rrsets = self.conn_r53.get_all_rrsets(zone_id, type=record_type, name=fqdn) + def get_record(self, zone_name, zone_id, record_name, record_type): + """ + + Args: + zone_name: + zone_id: + record_name(String): + record_type: + Returns: + String or None, in the event of there being no A or TXT record + """ + record_fqdn = "{0}.{1}.".format(record_name, zone_name) + rrsets = self.conn_r53.get_all_rrsets(zone_id, type=record_type, name=record_fqdn) for rr in rrsets: - if rr.type == record_type and rr.name == fqdn: + if rr.type == record_type and rr.name == record_fqdn: if rr.type == 'TXT': rr.resource_records[0] = rr.resource_records[0][1:-1] if rr.type == 'A': - return rr + if rr.alias_dns_name: + return rr.alias_dns_name return rr.resource_records[0] return None + + def get_full_record(self, zone_name, zone_id, record_name, record_type): + """ + Args: + zone_name: + zone_id: + record_name: + record_type: + + Returns: + RecordObject + """ + record_fqdn = "{0}.{1}.".format(record_name, zone_name) + rrsets = self.conn_r53.get_all_rrsets(zone_id, type=record_type, name=record_fqdn) + for rr in rrsets: + if rr.type == record_type and rr.name == record_fqdn: + return rr + return None diff --git a/tests/test_r53.py b/tests/test_r53.py index d9a1ceb..d9bf7d7 100644 --- a/tests/test_r53.py +++ b/tests/test_r53.py @@ -48,8 +48,8 @@ def test_get_record(self): r53_mock = mock.Mock() r53_connect_result = mock.Mock(name='cf_connect') r53_mock.return_value = r53_connect_result - m = mock.Mock(resource_records=['1.1.1.1']) - m.name = 'blah.dsd.io.' + m = mock.Mock(alias_dns_name='dnsname') + m.name = 'recordname.dsd.io.' m.type = 'A' response = [m] @@ -57,8 +57,8 @@ def test_get_record(self): r53_connect_result.configure_mock(**mock_config) boto.route53.connect_to_region = r53_mock r = r53.R53(self.env.aws_profile) - x = r.get_record('dsd.io', 'ASDAKSLDK', 'blah', 'A') - self.assertEquals(x, '1.1.1.1') + x = r.get_record('dsd.io', 'ASDAKSLDK', 'recordname', 'A') + self.assertEquals(x, 'dnsname') def test_get_TXT_record(self): r53_mock = mock.Mock() diff --git a/tests/tests.py b/tests/tests.py index 610f362..19bdc80 100755 --- a/tests/tests.py +++ b/tests/tests.py @@ -620,7 +620,7 @@ def test_elb(self): "CanonicalHostedZoneNameID"), "DNSName": GetAtt("ELBtestdevexternal", "DNSName") }, - "Name": "test-dev-external.kyrtest.pf.dsd.io." + "Name": "test-dev-external-name.kyrtest.pf.dsd.io." } ], ) @@ -637,7 +637,7 @@ def test_elb(self): GetAtt(lb, "CanonicalHostedZoneNameID"), "DNSName": GetAtt(lb, "DNSName") }, - "Name": "test-dev-internal.kyrtest.pf.dsd.io." + "Name": "test-dev-internal-name.kyrtest.pf.dsd.io." } ], ) @@ -1058,7 +1058,7 @@ def test_elb_with_ssl(self): "CanonicalHostedZoneNameID"), "DNSName": GetAtt("ELBdockerregistryservice", "DNSName") }, - "Name": "docker-registry.service.kyrtest.foo.bar." + "Name": "docker-registry.service-name.kyrtest.foo.bar." } ], ) @@ -1175,7 +1175,7 @@ def test_elb_with_healthcheck(self): "CanonicalHostedZoneNameID"), "DNSName": GetAtt("ELBdockerregistryservice", "DNSName") }, - "Name": "docker-registry.service.kyrtest.foo.bar." + "Name": "docker-registry.service-name.kyrtest.foo.bar." } ], ) @@ -1328,7 +1328,7 @@ def test_elb_with_reserved_chars(self): "DNSName": GetAtt(ELBdevdockerregistryservice, "DNSName") }, - "Name": "dev_docker-registry.service.kyrtest.foo.bar." + "Name": "dev_docker-registry.service-name.kyrtest.foo.bar." } ], ) From 5985e79671cbccbe76107f10c7a497cd1efbe051 Mon Sep 17 00:00:00 2001 From: yufangzhang Date: Fri, 15 Jul 2016 18:04:16 +0100 Subject: [PATCH 4/8] Add support for RDS instance --- bootstrap_cfn/config.py | 6 ++++++ bootstrap_cfn/fab_tasks.py | 39 +++++--------------------------------- bootstrap_cfn/r53.py | 24 +++++++++++++++++++++++ tests/tests.py | 8 ++++---- 4 files changed, 39 insertions(+), 38 deletions(-) diff --git a/bootstrap_cfn/config.py b/bootstrap_cfn/config.py index 07bb389..82bc841 100644 --- a/bootstrap_cfn/config.py +++ b/bootstrap_cfn/config.py @@ -551,6 +551,12 @@ def rds(self, template): if 'db-engine' in self.data['rds'] and self.data['rds']['db-engine'].startswith("sqlserver"): required_fields.pop('db-name') + if 'identifier' in self.data['rds']: + # update identifier name + self.data['rds']['identifier'] = "{}-{}".format(self.data['rds']['identifier'], self.stack_id) + logging.info("identifier was updated to {}".format(self.data['rds']['identifier'])) + print "identifier was updated to {}".format(self.data['rds']['identifier']) + # TEST FOR REQUIRED FIELDS AND EXIT IF MISSING ANY for yaml_key, rds_prop in required_fields.iteritems(): if yaml_key not in self.data['rds']: diff --git a/bootstrap_cfn/fab_tasks.py b/bootstrap_cfn/fab_tasks.py index 721d0c4..05bc11a 100755 --- a/bootstrap_cfn/fab_tasks.py +++ b/bootstrap_cfn/fab_tasks.py @@ -552,29 +552,17 @@ def cfn_delete(force=False, pre_delete_callbacks=None): stack_id = stack_name.split('-')[-1] zone_name = get_zone_name() zone_id = r53_conn.get_hosted_zone_id(zone_name) + elb = get_first_public_elb() if hasattr(env, "tag") and env.tag != 'active': # delete inactive stack stack_tag = env.tag logger.info("Deleting {} inactive stack {}...".format(stack_tag, stack_name)) print green("\nSTACK {0} DELETING...\n").format(stack_name) - # delete Alias record - elb_name = "{}-{}".format(elb, stack_id) - alias_record_object = r53_conn.get_full_record(zone_name, zone_id, elb_name, 'A') - if alias_record_object: - alias_record_value = [alias_record_object.alias_hosted_zone_id, - alias_record_object.alias_dns_name, - alias_record_object.alias_evaluate_target_health] - alias_record_name = "{}.{}".format(elb_name, zone_name) - r53_conn.delete_dns_record(zone_id, alias_record_name, 'A', alias_record_value, is_alias=True) - # delete TXT record + # delete Alias and TXT records txt_tag_record = get_tag_record_name(stack_tag) - txt_record_name = "{}.{}".format(txt_tag_record, zone_name) - txt_record_value = '"{}"'.format(r53_conn.get_record( - zone_name, zone_id, txt_tag_record, 'TXT')) - if txt_record_value: - r53_conn.delete_dns_record(zone_id, txt_record_name, 'TXT', txt_record_value) + r53_conn.delete_record(zone_name, zone_id, elb, stack_id, stack_tag, txt_tag_record) # Wait for stacks to delete print 'Waiting for stack to delete.' cfn.delete(stack_name) @@ -591,25 +579,8 @@ def cfn_delete(force=False, pre_delete_callbacks=None): stack_tag = 'active' print green("\nDELETING ACTIVE DNS RECORDS...\n") - - # delete 'A' record - main_record_name = "{}.{}".format(elb, zone_name) - stack_record_name = "{}-{}".format(elb, stack_id) - stack_record_object = r53_conn.get_full_record(zone_name, zone_id, stack_record_name, 'A') - main_record_object = r53_conn.get_full_record(zone_name, zone_id, elb, 'A') - main_record_value = [main_record_object.alias_hosted_zone_id, - main_record_object.alias_dns_name, - main_record_object.alias_evaluate_target_health] - if stack_record_object and stack_record_object.to_print() == main_record_object.to_print(): - r53_conn.delete_dns_record(zone_id, main_record_name, 'A', main_record_value, is_alias=True) - - # delete 'TXT' record - tag_record_name = get_tag_record_name(stack_tag) - record_value = '"{}"'.format(r53_conn.get_record( - zone_name, zone_id, tag_record_name, 'TXT')) - record_name = '{}.{}'.format(tag_record_name, zone_name) - if stack_id and stack_id == record_value[1:-1]: - r53_conn.delete_dns_record(zone_id, record_name, 'TXT', record_value) + txt_tag_record = get_tag_record_name(stack_tag) + r53_conn.delete_record(zone_name, zone_id, elb, stack_id, stack_tag, txt_tag_record) if 'ssl' in cfn_config.data: iam = get_connection(IAM) diff --git a/bootstrap_cfn/r53.py b/bootstrap_cfn/r53.py index 84719e4..27b2c67 100644 --- a/bootstrap_cfn/r53.py +++ b/bootstrap_cfn/r53.py @@ -98,6 +98,30 @@ def delete_dns_record(self, zone_id, record_name, record_type, record_value, is_ res = changes.commit() return res + def delete_record(self, zone_name, zone_id, elb, stack_id, stack_tag, txt_tag_record): + elb_name = "{}-{}".format(elb, stack_id) + alias_record_object = self.get_full_record(zone_name, zone_id, elb_name, 'A') + stack_record_name = None + stack_record_object = None + if stack_tag == 'active': + # do matching before deleting active records + stack_record_name = "{}-{}".format(elb, stack_id) + stack_record_object = self.get_full_record(zone_name, zone_id, stack_record_name, 'A') + + if alias_record_object: + alias_record_value = [alias_record_object.alias_hosted_zone_id, + alias_record_object.alias_dns_name, + alias_record_object.alias_evaluate_target_health] + alias_record_name = "{}.{}".format(elb_name, zone_name) + if stack_record_object and stack_record_object.to_print() == alias_record_object.to_print(): + self.delete_dns_record(zone_id, alias_record_name, 'A', alias_record_value, is_alias=True) + # delete TXT record + txt_record_name = "{}.{}".format(txt_tag_record, zone_name) + txt_record_value = '"{}"'.format(self.get_record( + zone_name, zone_id, txt_tag_record, 'TXT')) + if txt_record_value: + self.delete_dns_record(zone_id, txt_record_name, 'TXT', txt_record_value) + def get_record(self, zone_name, zone_id, record_name, record_type): """ diff --git a/tests/tests.py b/tests/tests.py index 19bdc80..7ad3862 100755 --- a/tests/tests.py +++ b/tests/tests.py @@ -300,7 +300,7 @@ def test_rds(self): ProjectConfig( 'tests/sample-project.yaml', 'dev', - 'tests/sample-project-passwords.yaml').config, 'my-stack-name') + 'tests/sample-project-passwords.yaml').config, 'my-stack-name-12345678') db_sg = ec2.SecurityGroup('DatabaseSG') db_sg.VpcId = Ref('VPC') @@ -356,7 +356,7 @@ def test_rds(self): # Identifier can be optionally be defined in the yaml template for compatibility. # We're only testing the case where it's defined. If left undefined AWS will # generate a random one. - self.assertEquals(identifier, 'test-dev') + self.assertEquals(identifier, 'test-dev-12345678') rds_dict["RDSInstance"]["Properties"].pop("DBInstanceIdentifier") known = self._resources_to_dict(known) compare(known, rds_dict) @@ -384,7 +384,7 @@ def test_rds_with_vpc_dependencies(self): ProjectConfig( 'tests/sample-project.yaml', 'dev', - 'tests/sample-project-passwords.yaml').config, 'my-stack-name') + 'tests/sample-project-passwords.yaml').config, 'my-stack-name-12345678') # generate and add the VPCGatewayAttachment resource to the template # to ensure it is passed as an attachment (DependsOn) later vpc_resources_needed_for_rds = [ @@ -447,7 +447,7 @@ def test_rds_with_vpc_dependencies(self): # Identifier can be optionally be defined in the yaml template for compatibility. # We're only testing the case where it's defined. If left undefined AWS will # generate a random one. - self.assertEquals(identifier, 'test-dev') + self.assertEquals(identifier, 'test-dev-12345678') rds_dict["RDSInstance"]["Properties"].pop("DBInstanceIdentifier") # keep just the keys (rds) we want to compare, # we are done with the vpc so pop the vpc gw attachment From 29b17082644c2d78dfd07b587498c2a69ddc5937 Mon Sep 17 00:00:00 2001 From: yufangzhang Date: Wed, 20 Jul 2016 09:44:00 +0100 Subject: [PATCH 5/8] Refactor code for unittest --- bootstrap_cfn/fab_tasks.py | 24 ++++-------------------- bootstrap_cfn/r53.py | 14 ++++++++++++++ 2 files changed, 18 insertions(+), 20 deletions(-) diff --git a/bootstrap_cfn/fab_tasks.py b/bootstrap_cfn/fab_tasks.py index 05bc11a..03891d1 100755 --- a/bootstrap_cfn/fab_tasks.py +++ b/bootstrap_cfn/fab_tasks.py @@ -397,25 +397,26 @@ def set_stack_name(): """ # create a stack id + r53_conn = get_connection(R53) + zone_name = get_zone_name() + zone_id = get_zone_id() stack_suffix = uuid.uuid4().__str__()[-8:] if hasattr(env, 'tag'): if env.tag == 'active': raise ActiveTagExistConflictError(stack_suffix) - elif hastag(env.tag): + elif r53_conn.hastag(zone_name, zone_id, get_tag_record_name(env.tag)): raise TagRecordExistConflictError(env.tag) else: stack_tag = env.tag else: stack_tag = stack_suffix env.tag = stack_tag - zone_id = get_zone_id() record = "{}.{}".format(get_tag_record_name(stack_tag), get_zone_name()) logger.info("fab_tasks::set_stack_name: " "Creating stack suffix {} " "for record '{}' " "in zone id '{}'...".format(stack_suffix, record, zone_id)) # Let DNS update DNSServerError propogate - r53_conn = get_connection(R53) try: r53_conn.update_dns_record(zone_id, record, 'TXT', '"{0}"'.format(stack_suffix)) env.stack_name = "{0}-{1}".format(get_legacy_name(), stack_suffix) @@ -424,22 +425,6 @@ def set_stack_name(): return env.stack_name -def hastag(stack_tag): - """ - Check if stack_tag is in use - Args: - stack_tag: the tag of stack - Returns: - String if stack exists - None if not. - """ - r53_conn = get_connection(R53) - zone_id = get_zone_id() - record_name = get_tag_record_name(stack_tag) - hasrecord = r53_conn.get_record(get_zone_name(), zone_id, record_name, 'TXT') - return hasrecord - - def get_zone_name(): zone_name = get_basic_config().get('master_zone', None) if not zone_name: @@ -552,7 +537,6 @@ def cfn_delete(force=False, pre_delete_callbacks=None): stack_id = stack_name.split('-')[-1] zone_name = get_zone_name() zone_id = r53_conn.get_hosted_zone_id(zone_name) - elb = get_first_public_elb() if hasattr(env, "tag") and env.tag != 'active': # delete inactive stack stack_tag = env.tag diff --git a/bootstrap_cfn/r53.py b/bootstrap_cfn/r53.py index 27b2c67..9b5a292 100644 --- a/bootstrap_cfn/r53.py +++ b/bootstrap_cfn/r53.py @@ -162,3 +162,17 @@ def get_full_record(self, zone_name, zone_id, record_name, record_type): if rr.type == record_type and rr.name == record_fqdn: return rr return None + + def hastag(self, zone_name, zone_id, record_name): + """ + Check if stack_tag is in use + Args: + zone_name: + zone_id: + record_name: + Returns: + String if stack exists + None if not. + """ + hasrecord = self.get_record(zone_name, zone_id, record_name, 'TXT') + return hasrecord From 7d5479e7859cde7d69d7c3841a7789d5d65072bb Mon Sep 17 00:00:00 2001 From: yufangzhang Date: Wed, 20 Jul 2016 10:05:42 +0100 Subject: [PATCH 6/8] Add unittest for multiple stacks --- bootstrap_cfn/fab_tasks.py | 14 +- tests/test_fab_tasks.py | 350 ++++++++++++++++++++++++++++++++++++- tests/test_r53.py | 42 +++++ 3 files changed, 399 insertions(+), 7 deletions(-) diff --git a/bootstrap_cfn/fab_tasks.py b/bootstrap_cfn/fab_tasks.py index 03891d1..236c197 100755 --- a/bootstrap_cfn/fab_tasks.py +++ b/bootstrap_cfn/fab_tasks.py @@ -7,8 +7,6 @@ import boto3 -import dns.resolver - from fabric.api import env, task from fabric.colors import green, red from fabric.utils import abort @@ -361,21 +359,23 @@ def get_stack_name(new=False): # know it yet... env.stack_name = 'temp' zone_name = get_zone_name() + zone_id = get_zone_id() if not zone_name: raise CfnConfigError("No master_zone in yaml, unable to create/find DNS records for stack name") logger.info("fab_tasks::get_stack_name: Found master zone '{}' in config...".format(zone_name)) # get record name in the format of: stack.[stack_tag].[app]-[env] record_name = get_tag_record_name(stack_tag) dns_name = "{}.{}".format(record_name, zone_name) + r53_conn = get_connection(R53) try: # get stack id - stack_suffix = dns.resolver.query(dns_name, 'TXT')[0].to_text().replace('"', "") + stack_suffix = r53_conn.get_record(zone_name, zone_id, record_name, 'TXT').replace('"', "") logger.info("fab_tasks::get_stack_name: Found stack suffix '{}' " "for dns record '{}'... ".format(stack_suffix, dns_name)) legacy_name = get_legacy_name() env.stack_name = "{0}-{1}".format(legacy_name, stack_suffix) logger.info("fab_tasks::get_stack_name: Found stack name '{}'...".format(env.stack_name)) - except dns.resolver.NXDOMAIN: + except: raise DNSRecordNotFoundError(dns_name) return env.stack_name @@ -411,7 +411,7 @@ def set_stack_name(): else: stack_tag = stack_suffix env.tag = stack_tag - record = "{}.{}".format(get_tag_record_name(stack_tag), get_zone_name()) + record = "{}.{}".format(get_tag_record_name(stack_tag), zone_name) logger.info("fab_tasks::set_stack_name: " "Creating stack suffix {} " "for record '{}' " @@ -536,7 +536,7 @@ def cfn_delete(force=False, pre_delete_callbacks=None): elb = get_first_public_elb() stack_id = stack_name.split('-')[-1] zone_name = get_zone_name() - zone_id = r53_conn.get_hosted_zone_id(zone_name) + zone_id = get_zone_id() if hasattr(env, "tag") and env.tag != 'active': # delete inactive stack stack_tag = env.tag @@ -558,6 +558,7 @@ def cfn_delete(force=False, pre_delete_callbacks=None): print green("Stack successfully deleted") else: print red("Stack deletion was unsuccessful") + return False else: # delete active dns records @@ -569,6 +570,7 @@ def cfn_delete(force=False, pre_delete_callbacks=None): if 'ssl' in cfn_config.data: iam = get_connection(IAM) iam.delete_ssl_certificate(cfn_config.ssl(), stack_name) + return True @task diff --git a/tests/test_fab_tasks.py b/tests/test_fab_tasks.py index bb6339a..d9338ea 100644 --- a/tests/test_fab_tasks.py +++ b/tests/test_fab_tasks.py @@ -1,18 +1,366 @@ import unittest -from bootstrap_cfn import fab_tasks # noqa +import boto + +import yaml + from mock import patch, Mock # noqa +from bootstrap_cfn import cloudformation, config, errors, fab_tasks, iam, r53 + fake_profile = {'lol': {'aws_access_key_id': 'secretz', 'aws_secret_access_key': 'verysecretz'}} +def set_up_basic_config(): + ''' + Returns: a config.yaml test example + + ''' + basic_config = {'master_zone': 'dsd.io', + 'ec2': {}, + 'elb': [{'hosted_zone': 'unittest.dsd.io.', + 'name': 'unittest', + 'scheme': 'internet-facing'}], + 'rds': {}, + 's3': {}} + return yaml.dump(basic_config) + + class TestFabTasks(unittest.TestCase): def test_loaded(self): # Not a great test, but it at least checks for syntax erros in the file pass + def cfn_mock(self): + cf_mock = Mock() + cf_connect_result = Mock(name='cf_connect') + cf_mock.return_value = cf_connect_result + my_stack_name = "unittest-dev" + example_return = {'DeleteStackResponse': {'ResponseMetadata': {'RequestId': 'someuuid'}}} + stack_mock = Mock(stack_name=my_stack_name) + stack_mock.resource_status = 'CREATE_COMPLETE' + mock_config = {'delete_stack.return_value': example_return, + 'create_stack.return_value': my_stack_name, + 'describe_stacks.return_value': [stack_mock]} + cf_connect_result.configure_mock(**mock_config) + boto.cloudformation.connect_to_region = cf_mock + cf = cloudformation.Cloudformation("profile_name") + return Mock(return_value=cf) + + def iam_mock(self): + iam_mock = Mock() + iam_connect_result = Mock(name='iam_connect') + iam_mock.return_value = iam_connect_result + mock_config = {'delete_ssl_certificate.return_value': True} + iam_connect_result.configure_mock(**mock_config) + boto.iam.connect_to_region = iam_mock + i = iam.IAM("profile_name") + return Mock(return_value=i) + + def r53_mock(self): + ''' + Mock route53 connection and dsn records + Returns: + R53 Mock object + ''' + r53_mock = Mock() + r53_connect_result = Mock(name='r53_connect') + r53_mock.return_value = r53_connect_result + m1 = Mock(alias_dns_name="unittest1") + m1.name = 'unittest_elb-12345678.dsd.io.' + m1.type = 'A' + m1.alias_hosted_zone_id = "ASDAKSLSA" + m1.alias_evaluate_target_health = False + m2 = Mock(resource_records=['"12345678"']) + m2.name = 'stack.active.unittest-dev.dsd.io.' + m2.type = 'TXT' + m2.alias_hosted_zone_id = "ASDAKSLSA" + m2.alias_evaluate_target_health = False + m3 = Mock(alias_dns_name="unittest1") + m3.name = 'unittest_elb.dsd.io.' + m3.type = 'A' + m3.alias_hosted_zone_id = "ASDAKSLSA" + m3.alias_evaluate_target_health = False + m4 = Mock(resource_records=['"12345678"']) + m4.name = 'stack.test.unittest-dev.dsd.io.' + m4.type = 'TXT' + m4.alias_hosted_zone_id = "ASDAKSLSA" + m4.alias_evaluate_target_health = False + response = [m1, m2, m3, m4] + mock_config = {'update_dns_record.return_value': response, + 'get_all_rrsets.return_value': response, + 'delete_dns_record.return_value': response} + r53_connect_result.configure_mock(**mock_config) + boto.route53.connect_to_region = r53_mock + r = r53.R53("profile_name") + return Mock(return_value=r) + + def connection_side_effect(self, klass): + ''' + Returns r53/cfn/iam mock for get_connection(klass) + depending on different klass + ''' + if klass.__name__ == r53.R53.__name__: + return self.r53_mock() + elif klass.__name__ == cloudformation.Cloudformation.__name__: + return self.cfn_mock() + elif klass.__name__ == iam.IAM.__name__: + return self.iam_mock() + @patch('botocore.session.Session.get_scoped_config') def test_aws_task(self, mock_botocore): mock_botocore.return_value = fake_profile['lol'] fab_tasks.aws('nonexistent_profile') + + @patch('bootstrap_cfn.fab_tasks.get_config') + def test_get_all_elbs(self, get_config_function): + ''' + Check if get_all_elbs() returns all internet facing elbs. + Args: + get_config_function: mock of get_config() function + + ''' + basic_config_mock = yaml.load(set_up_basic_config()) + get_config_function.return_value = config.ConfigParser( + basic_config_mock, "unittest_stack_name", "dev", "test") + + all_elbs = fab_tasks.get_all_elbs() + self.assertEqual(all_elbs, ["unittest"]) + + @patch('bootstrap_cfn.fab_tasks.get_all_elbs', return_value=["unittest_elb"]) + def test_get_first_public_elb(self, get_all_elbs_function): + ''' + Check if get_first_public_elb() returns the first internet facing elb + Args: + get_all_elbs_function: mock of get_all_elbs(), a list of elbs + + ''' + first_elb = fab_tasks.get_first_public_elb() + self.assertEqual(first_elb, "unittest_elb") + + @patch('bootstrap_cfn.fab_tasks.get_all_elbs', return_value=[]) + def test_no_public_elb(self, get_all_elbs_function): + ''' + Check if exception is raised when no elbs found + Args: + get_all_elbs_function: mock of get_all_elbs(), a list of elbs + + ''' + with self.assertRaises(errors.PublicELBNotFoundError): + fab_tasks.get_first_public_elb() + + @patch('bootstrap_cfn.fab_tasks.get_zone_name', return_value="dsd.io") + @patch('bootstrap_cfn.fab_tasks.get_legacy_name', return_value="unittest-dev") + @patch('bootstrap_cfn.fab_tasks.get_zone_id', return_value="ASDAKSLDK") + @patch('bootstrap_cfn.fab_tasks.get_first_public_elb', return_value="unittest_elb") + def test_get_active_stack(self, get_first_public_elb_function, + get_zone_id_function, + get_legacy_name_function, + get_zone_name_function): + ''' + Return stack_id of m2 record defined in def r53_mock() + Args: + get_first_public_elb_function: + get_zone_id_function: + get_legacy_name_function: + get_zone_name_function: + + Returns: + + ''' + fab_tasks.get_connection = self.r53_mock() + # fab_tasks.get_connection = Mock(return_value=r) + + res = fab_tasks.get_active_stack() + self.assertTrue(res) + self.assertEqual(res, "12345678") + + @patch('bootstrap_cfn.fab_tasks.get_zone_name', return_value="dsd.io") + @patch('bootstrap_cfn.fab_tasks.get_legacy_name', return_value="unittest-dev") + @patch('bootstrap_cfn.fab_tasks.get_zone_id', return_value="ASDAKSLDK") + @patch('bootstrap_cfn.fab_tasks.get_first_public_elb', return_value="unittest_elb") + def test_set_active_stack(self, get_first_public_elb_function, + get_zone_id_function, + get_legacy_name_function, + get_zone_name_function): + ''' + set stack tagged with "test" as active stack, + using m4 record defined in def r53_mock() + Args: + get_first_public_elb_function: + get_zone_id_function: + get_legacy_name_function: + get_zone_name_function: + + Returns: + + ''' + fab_tasks.get_connection = self.r53_mock() + # fab_tasks.get_connection = Mock(return_value=r) + ret = fab_tasks.set_active_stack("test", force=True) + self.assertTrue(ret) + + @patch('bootstrap_cfn.fab_tasks.get_connection') + @patch('bootstrap_cfn.fab_tasks.get_config') + @patch('bootstrap_cfn.fab_tasks.get_zone_name', return_value="dsd.io") + @patch('bootstrap_cfn.fab_tasks.get_legacy_name', return_value="unittest-dev") + @patch('bootstrap_cfn.fab_tasks.get_zone_id', return_value="ASDAKSLDK") + @patch('bootstrap_cfn.fab_tasks.get_first_public_elb', return_value="unittest_elb") + @patch('bootstrap_cfn.utils.tail') + @patch('bootstrap_cfn.fab_tasks.get_stack_name', return_value="stack.active.unittest-dev.dsd.io") + def test_cfn_delete_active_records(self, get_stack_name_function, + tail_function, + get_first_public_elb_function, + get_zone_id_function, + get_legacy_name_function, + get_zone_name_function, + get_config_function, + get_connection_function): + ''' + Delete active dns records + Do not delete stack + Args: + get_first_public_elb_function: + get_zone_id_function: + get_legacy_name_function: + get_zone_name_function: + get_basic_config_function: + ''' + get_connection_function.side_effect = self.connection_side_effect + basic_config_mock = yaml.load(set_up_basic_config()) + get_config_function.return_value = config.ConfigParser( + basic_config_mock, "unittest_stack_name", "dev", "test") + tail_function.side_effect = self.tail_logs + ret = fab_tasks.cfn_delete(force=True) + self.assertTrue(ret) + + def tail_logs(self, e): + return "stack successfully deleted" + + @patch('bootstrap_cfn.config.ConfigParser.process', return_value="test") + @patch('bootstrap_cfn.fab_tasks.get_cloudformation_tags', return_value="test") + @patch('bootstrap_cfn.fab_tasks.get_connection') + @patch('bootstrap_cfn.fab_tasks.get_config') + @patch('bootstrap_cfn.fab_tasks.get_zone_name', return_value="dsd.io") + @patch('bootstrap_cfn.fab_tasks.get_legacy_name', return_value="unittest-dev") + @patch('bootstrap_cfn.fab_tasks.get_zone_id', return_value="ASDAKSLDK") + @patch('bootstrap_cfn.fab_tasks.get_first_public_elb', return_value="unittest_elb") + @patch('bootstrap_cfn.utils.tail') + @patch('bootstrap_cfn.fab_tasks.get_stack_name', return_value="stack.active.unittest-dev.dsd.io") + def test_cfn_create_without_ssl(self, get_stack_name_function, + tail_function, + get_first_public_elb_function, + get_zone_id_function, + get_legacy_name_function, + get_zone_name_function, + get_config_function, + get_connection_function, + get_cloudformation_tags, + process_function): + '''import pdb; pdb.set_trace() + get_connection_function.side_effect = self.connection_side_effect + basic_config_mock = yaml.load(set_up_basic_config()) + get_config_function.return_value = config.ConfigParser( + basic_config_mock, "unittest_stack_name", "dev", "test") + tail_function.side_effect = self.tail_logs + ret = fab_tasks.cfn_create(False) + self.assertTrue(ret)''' + + @patch('bootstrap_cfn.fab_tasks.get_legacy_name', return_value="unittest-dev") + def test_get_tag_record_name(self, get_legacy_name_function): + ''' + Check if it returns tagged record name + Args: + get_legacy_name_function: mock of get_legacy_name, + "[application]-[environment]" + + Returns: + + ''' + record_name = fab_tasks.get_tag_record_name("test") + self.assertEqual(record_name, "stack.test.unittest-dev") + + @patch('bootstrap_cfn.fab_tasks.get_zone_name', return_value="dsd.io") + @patch('bootstrap_cfn.fab_tasks.get_legacy_name', return_value="unittest-dev") + @patch('bootstrap_cfn.fab_tasks.get_zone_id', return_value="ASDAKSLDK") + @patch('bootstrap_cfn.fab_tasks.get_first_public_elb', return_value="unittest_elb") + def test_get_stack_name(self, get_first_public_elb_function, + get_zone_id_function, + get_legacy_name_function, + get_zone_name_function): + ''' + test if it returns correct stack name + Args: + get_first_public_elb_function: get_first_public_elb() + get_zone_id_function: get_zone_id() + get_legacy_name_function: get_legacy_name(): [application-environment] + get_zone_name_function: get_zone_name() + ''' + fab_tasks.get_connection = self.r53_mock() + # import pdb;pdb.set_trace() + stack_name = fab_tasks.get_stack_name(False) + self.assertTrue(stack_name) + self.assertEqual(stack_name, "unittest-dev-12345678") + + @patch('bootstrap_cfn.fab_tasks.get_zone_name', return_value="dsd.io") + @patch('bootstrap_cfn.fab_tasks.get_legacy_name', return_value="unittest-dev") + @patch('bootstrap_cfn.fab_tasks.get_zone_id', return_value="ASDAKSLDK") + @patch('bootstrap_cfn.fab_tasks.get_first_public_elb', return_value="unittest_elb") + def test_set_stack_name(self, get_first_public_elb_function, + get_zone_id_function, + get_legacy_name_function, + get_zone_name_function): + ''' + Test set_stack_name + Args: + get_first_public_elb_function: get_first_public_elb() + get_zone_id_function: get_zone_id() + get_legacy_name_function: get_legacy_name(): [application-environment] + get_zone_name_function: get_zone_name() + + Returns: + + ''' + fab_tasks.get_connection = self.r53_mock() + + stack_tag_mock = Mock(return_value="test") + fab_tasks.env.tag = stack_tag_mock + stack_name = fab_tasks.set_stack_name() + self.assertTrue(stack_name) + + @patch('bootstrap_cfn.fab_tasks.get_basic_config') + def test_get_zone_name(self, get_basic_config_function): + ''' + Check if it returns the right zone name + + ''' + get_basic_config_function.return_value = yaml.load(set_up_basic_config()) + zone_name = fab_tasks.get_zone_name() + self.assertEqual(zone_name, "dsd.io") + + @patch('bootstrap_cfn.fab_tasks.get_zone_name', return_value="dsd.io") + def test_get_zone_id(self, get_zone_name_function): + ''' + Check if it returns right zone id + Args: + get_zone_name_function: mock of get_zone_name + ''' + # mock r53 + r53_mock = Mock() + r53_connect_result = Mock(name='cf_connect') + r53_mock.return_value = r53_connect_result + response = {'GetHostedZoneResponse': { + "HostedZone": { + "Id": "/hostedzone/Z1GDM6HEODZI69" + } + }} + # get_hosted_zone_by_name is within get_hosted_zone_id() + mock_config = {'get_hosted_zone_by_name.return_value': response} + r53_connect_result.configure_mock(**mock_config) + boto.route53.connect_to_region = r53_mock + r = r53.R53("profile_name") + fab_tasks.get_connection = Mock(return_value=r) + + zone_id = fab_tasks.get_zone_id() + self.assertEqual(zone_id, "Z1GDM6HEODZI69") diff --git a/tests/test_r53.py b/tests/test_r53.py index d9bf7d7..c71f510 100644 --- a/tests/test_r53.py +++ b/tests/test_r53.py @@ -29,6 +29,15 @@ def test_update_dns_record(self): x = r.update_dns_record('blah/blah', 'x.y', 'A', '1.1.1.1') self.assertTrue(x) + def test_delete_dns_record(self): + r53_mock = mock.Mock() + r53_connect_result = mock.Mock(name='cf_connect') + r53_mock.return_value = r53_connect_result + boto.route53.connect_to_region = r53_mock + r = r53.R53(self.env.aws_profile) + x = r.delete_dns_record('blah/blah', 'x.y', 'A', '1.1.1.1') + self.assertTrue(x) + def test_get_hosted_zone_id(self): r53_mock = mock.Mock() r53_connect_result = mock.Mock(name='cf_connect') @@ -60,6 +69,23 @@ def test_get_record(self): x = r.get_record('dsd.io', 'ASDAKSLDK', 'recordname', 'A') self.assertEquals(x, 'dnsname') + def test_get_full_record(self): + record_fqdn = "recordname" + r53_mock = mock.Mock() + r53_connect_result = mock.Mock(name='cf_connect') + r53_mock.return_value = r53_connect_result + m = mock.Mock(record="unittest") + m.name = 'recordname.dsd.io.' + m.type = 'A' + response = [m] + mock_config = {'get_all_rrsets.return_value': response} + r53_connect_result.configure_mock(**mock_config) + boto.route53.connect_to_region = r53_mock + r = r53.R53(self.env.aws_profile) + + rrsets = r.get_full_record('dsd.io', 'ASDAKSLDK', record_fqdn, 'A') + self.assertEqual(rrsets.record, m.record) + def test_get_TXT_record(self): r53_mock = mock.Mock() r53_connect_result = mock.Mock(name='cf_connect') @@ -75,3 +101,19 @@ def test_get_TXT_record(self): r = r53.R53(self.env.aws_profile) x = r.get_record('dsd.io', 'ASDAKSLDK', 'blah', 'TXT') self.assertEquals(x, 'lollol') + + def test_hastag(self): + r53_mock = mock.Mock() + r53_connect_result = mock.Mock(name='cf_connect') + r53_mock.return_value = r53_connect_result + + m = mock.Mock(resource_records=['"lollol"']) + m.name = 'recordname.dsd.io.' + m.type = 'TXT' + response = [m] + mock_config = {'get_all_rrsets.return_value': response} + r53_connect_result.configure_mock(**mock_config) + boto.route53.connect_to_region = r53_mock + r = r53.R53(self.env.aws_profile) + x = r.get_record("dsd.io", "ASDAKSLDK", "recordname", 'TXT') + self.assertTrue(x) From 0c19619828d3cd8530cb7c5ca5c0d543d9ec4205 Mon Sep 17 00:00:00 2001 From: yufangzhang Date: Tue, 26 Jul 2016 17:53:54 +0100 Subject: [PATCH 7/8] Add test case for deleting inactive stack --- bootstrap_cfn/fab_tasks.py | 16 +++++++++----- tests/test_fab_tasks.py | 43 +++++++++++++++++++++++++++++++++++++- 2 files changed, 53 insertions(+), 6 deletions(-) diff --git a/bootstrap_cfn/fab_tasks.py b/bootstrap_cfn/fab_tasks.py index 236c197..c31175f 100755 --- a/bootstrap_cfn/fab_tasks.py +++ b/bootstrap_cfn/fab_tasks.py @@ -350,7 +350,7 @@ def get_stack_name(new=False): set_stack_name() if hasattr(env, 'tag'): - stack_tag = env.tag + stack_tag = get_env_tag() else: stack_tag = 'active' env.tag = stack_tag @@ -407,7 +407,7 @@ def set_stack_name(): elif r53_conn.hastag(zone_name, zone_id, get_tag_record_name(env.tag)): raise TagRecordExistConflictError(env.tag) else: - stack_tag = env.tag + stack_tag = get_env_tag() else: stack_tag = stack_suffix env.tag = stack_tag @@ -537,15 +537,14 @@ def cfn_delete(force=False, pre_delete_callbacks=None): stack_id = stack_name.split('-')[-1] zone_name = get_zone_name() zone_id = get_zone_id() - if hasattr(env, "tag") and env.tag != 'active': + if not isactive(): # delete inactive stack - stack_tag = env.tag + stack_tag = get_env_tag logger.info("Deleting {} inactive stack {}...".format(stack_tag, stack_name)) print green("\nSTACK {0} DELETING...\n").format(stack_name) # delete Alias and TXT records txt_tag_record = get_tag_record_name(stack_tag) - r53_conn.delete_record(zone_name, zone_id, elb, stack_id, stack_tag, txt_tag_record) # Wait for stacks to delete print 'Waiting for stack to delete.' @@ -573,6 +572,13 @@ def cfn_delete(force=False, pre_delete_callbacks=None): return True +def get_env_tag(): + return env.tag + +def isactive(): + return hasattr(env, "tag") and env.tag != 'active' + + @task def cfn_create(test=False): """ diff --git a/tests/test_fab_tasks.py b/tests/test_fab_tasks.py index d9338ea..8cda6a6 100644 --- a/tests/test_fab_tasks.py +++ b/tests/test_fab_tasks.py @@ -201,6 +201,7 @@ def test_set_active_stack(self, get_first_public_elb_function, ret = fab_tasks.set_active_stack("test", force=True) self.assertTrue(ret) + @patch('bootstrap_cfn.fab_tasks.isactive', return_value=True) @patch('bootstrap_cfn.fab_tasks.get_connection') @patch('bootstrap_cfn.fab_tasks.get_config') @patch('bootstrap_cfn.fab_tasks.get_zone_name', return_value="dsd.io") @@ -216,7 +217,8 @@ def test_cfn_delete_active_records(self, get_stack_name_function, get_legacy_name_function, get_zone_name_function, get_config_function, - get_connection_function): + get_connection_function, + isactive_function): ''' Delete active dns records Do not delete stack @@ -235,6 +237,45 @@ def test_cfn_delete_active_records(self, get_stack_name_function, ret = fab_tasks.cfn_delete(force=True) self.assertTrue(ret) + @patch('bootstrap_cfn.fab_tasks.get_env_tag', return_value='test') + @patch('bootstrap_cfn.fab_tasks.isactive', return_value=False) + @patch('bootstrap_cfn.fab_tasks.get_connection') + @patch('bootstrap_cfn.fab_tasks.get_config') + @patch('bootstrap_cfn.fab_tasks.get_zone_name', return_value="dsd.io") + @patch('bootstrap_cfn.fab_tasks.get_legacy_name', return_value="unittest-dev") + @patch('bootstrap_cfn.fab_tasks.get_zone_id', return_value="ASDAKSLDK") + @patch('bootstrap_cfn.fab_tasks.get_first_public_elb', return_value="unittest_elb") + @patch('bootstrap_cfn.utils.tail') + @patch('bootstrap_cfn.fab_tasks.get_stack_name', return_value="stack.test.unittest-dev.dsd.io") + def test_cfn_delete_inactive_stack(self, get_stack_name_function, + tail_function, + get_first_public_elb_function, + get_zone_id_function, + get_legacy_name_function, + get_zone_name_function, + get_config_function, + get_connection_function, + isactive_function, + get_env_tag_function): + ''' + Delete inactive stack + Args: + get_first_public_elb_function: + get_zone_id_function: + get_legacy_name_function: + get_zone_name_function: + get_basic_config_function: + ''' + get_connection_function.side_effect = self.connection_side_effect + basic_config_mock = yaml.load(set_up_basic_config()) + get_config_function.return_value = config.ConfigParser( + basic_config_mock, "unittest_stack_name", "dev", "test") + tail_function.side_effect = self.tail_logs + import pdb; pdb.set_trace() + ret = fab_tasks.cfn_delete(force=True) + self.assertTrue(ret) + import pdb; pdb.set_trace() + def tail_logs(self, e): return "stack successfully deleted" From b3bc5430d27166b1a1a6ba90074aa4a44ec790f1 Mon Sep 17 00:00:00 2001 From: yufangzhang Date: Tue, 26 Jul 2016 18:02:17 +0100 Subject: [PATCH 8/8] fixup --- bootstrap_cfn/fab_tasks.py | 1 + tests/test_fab_tasks.py | 13 ++++++++----- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/bootstrap_cfn/fab_tasks.py b/bootstrap_cfn/fab_tasks.py index c31175f..f21d4e4 100755 --- a/bootstrap_cfn/fab_tasks.py +++ b/bootstrap_cfn/fab_tasks.py @@ -575,6 +575,7 @@ def cfn_delete(force=False, pre_delete_callbacks=None): def get_env_tag(): return env.tag + def isactive(): return hasattr(env, "tag") and env.tag != 'active' diff --git a/tests/test_fab_tasks.py b/tests/test_fab_tasks.py index 8cda6a6..abd16e4 100644 --- a/tests/test_fab_tasks.py +++ b/tests/test_fab_tasks.py @@ -271,13 +271,11 @@ def test_cfn_delete_inactive_stack(self, get_stack_name_function, get_config_function.return_value = config.ConfigParser( basic_config_mock, "unittest_stack_name", "dev", "test") tail_function.side_effect = self.tail_logs - import pdb; pdb.set_trace() ret = fab_tasks.cfn_delete(force=True) self.assertTrue(ret) - import pdb; pdb.set_trace() def tail_logs(self, e): - return "stack successfully deleted" + print "stack successfully deleted" @patch('bootstrap_cfn.config.ConfigParser.process', return_value="test") @patch('bootstrap_cfn.fab_tasks.get_cloudformation_tags', return_value="test") @@ -299,14 +297,19 @@ def test_cfn_create_without_ssl(self, get_stack_name_function, get_connection_function, get_cloudformation_tags, process_function): - '''import pdb; pdb.set_trace() + ''' cfn.create returns + stack + cfn.get_last_stack_event() returns: + stack_evt + get_connection_function.side_effect = self.connection_side_effect basic_config_mock = yaml.load(set_up_basic_config()) get_config_function.return_value = config.ConfigParser( basic_config_mock, "unittest_stack_name", "dev", "test") tail_function.side_effect = self.tail_logs ret = fab_tasks.cfn_create(False) - self.assertTrue(ret)''' + self.assertTrue(ret) + ''' @patch('bootstrap_cfn.fab_tasks.get_legacy_name', return_value="unittest-dev") def test_get_tag_record_name(self, get_legacy_name_function):