Skip to content

Commit

Permalink
Merge d13d239 into cab04e8
Browse files Browse the repository at this point in the history
  • Loading branch information
adamjkeller committed Sep 14, 2015
2 parents cab04e8 + d13d239 commit aa808cc
Show file tree
Hide file tree
Showing 5 changed files with 372 additions and 16 deletions.
5 changes: 5 additions & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,14 @@ python:

install:
- python setup.py install
- pip install coveralls

before_script:
- export PYTHONPATH=$PYTHONPATH:$PWD

script:
- coverage run --source License2Deploy setup.py test
- python setup.py test

after_success:
coveralls
184 changes: 184 additions & 0 deletions License2Deploy/rolling_deploy.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,184 @@
#!/usr/bin/python

import logging
from boto.ec2 import autoscale as a, connection as c, elb as e
from sys import exit, argv
from time import sleep

class RollingDeploy(object):

def __init__(self, env=None, project=None, buildNum=None):
self.env = env
self.project = project.replace('-','')
self.buildNum = buildNum
self.region = self.determine_region(self.env)

def aws_conn_auto(self):
return a.connect_to_region(self.region)

def aws_conn_ec2(self):
return c.EC2Connection()

def aws_conn_elb(self):
return e.connect_to_region(self.region)

def determine_region(self, env):
if env == 'qa':
return 'us-west-1'
else:
return 'us-east-1'

def get_group_info(self, group_name=None):
try:
if group_name:
rslt = self.aws_conn_auto().get_all_groups([group_name])
if not rslt:
raise Exception("Bad Group: {0}".format(group_name))
return rslt
else:
return self.aws_conn_auto().get_all_groups()
except Exception as e:
logging.error("Unable to pull down autoscale group: {0}".format(e))
exit(2)

def get_autoscale_group_name(self):
''' Search for project in autoscale groups and return autoscale group name '''
proj_name = next((instance.name for instance in filter(lambda n: n.name, self.get_group_info()) if self.project in instance.name), None)
return proj_name

def calculate_autoscale_desired_instance_count(self, group_name, desired_state):
''' Search via specific autoscale group name to return modified desired instance count '''
try:
cur_count = int(self.get_group_info(group_name)[0].desired_capacity)
if desired_state == 'increase':
new_count = self.double_autoscale_instance_count(cur_count)
elif desired_state == 'decrease':
new_count = self.decrease_autoscale_instance_count(cur_count)
logging.info("Current desired count was changed from {0} to {1}".format(cur_count, new_count))
return new_count
except UnboundLocalError as u:
logging.error("Please make sure the desired_state is set to either increase or decrease: {0}".format(u))
exit(2)

def double_autoscale_instance_count(self, count):
''' Multiply current count by 2 '''
return count * 2

def decrease_autoscale_instance_count(self, count):
''' Divide current count in half '''
return count / 2

def set_autoscale_instance_desired_count(self, new_count, group_name):
''' Increase desired count by double '''
try:
logging.info("Set autoscale capacity for {0} to {1}".format(group_name, new_count))
self.aws_conn_auto().set_desired_capacity(group_name, new_count)
return True
except Exception as e:
logging.error("Unable to update desired count, please investigate error: {0}".format(e))
exit(2)

def get_all_instance_ids(self, group_name):
''' Gather Instance id's of all instances in the autoscale group '''
instances = [ i for i in self.get_group_info(group_name)[0].instances ]
id_list = []
for instance_id in instances:
id_list.append(instance_id.instance_id)

logging.info("List of all Instance ID's in {0}: {1}".format(group_name, id_list))
return id_list

def get_instance_ids_by_requested_build_tag(self, id_list, build):
''' Gather Instance id's of all instances in the autoscale group '''
reservations = self.aws_conn_ec2().get_all_instances()
new_instances = []
for instance_id in id_list:
rslt = [inst for r in reservations for inst in r.instances if 'BUILD' in inst.tags and inst.id == instance_id]
for new_id in rslt:
if new_id.tags['BUILD'] == str(build):
new_instances.append(instance_id)

if new_instances:
logging.info("New Instance List: {0}".format(new_instances))
return new_instances
else:
logging.error("New Instance List is empty, something went wrong")
exit(2)

def wait_for_new_instances(self, instance_ids, retry=9, wait_time=30):
''' Monitor new instances that come up and wait until they are ready '''
for instance in instance_ids:
count = 0
health = []
while (count <= retry) and (len(health) < 2):
instanceStatus = self.aws_conn_ec2().get_all_instance_status(instance)
for state in instanceStatus:
health = [x for x in [str(state.system_status.status), str(state.instance_status.status)] if x == "ok"]
if (len(health) < 2):
logging.warning("{0} is not in a fully working state yet".format(instance))
sleep(wait_time)
count = (count + 1)
if (count > retry):
logging.error("{0} has not reached a valid healthy state".format(instance))
exit(2)
else:
logging.info("{0} is in a healthy state. Moving on...".format(instance))

def lb_healthcheck(self, new_ids, retry=10, wait_time=10):
''' Confirm that the healthchecks report back OK in the LB. '''
try:
lb = next(n.name for n in self.aws_conn_elb().get_all_load_balancers() if self.project in str(n.name))
inst_length = len(new_ids)
for inst_id in range(inst_length):
count = 0
instance_id = self.aws_conn_elb().describe_instance_health(lb)[inst_id]
while instance_id.state != 'InService' and (count < (retry + 1)):
logging.warning("Load balancer healthcheck is returning {0} for {1}. Retrying after 10 seconds. Count == {2}".format(instance_id.state, instance_id.instance_id, count))
instance_id = self.aws_conn_elb().describe_instance_health(lb)[inst_id]
count = (count + 1)
sleep(wait_time)
if instance_id.state != 'InService' and (count >= retry):
logging.error("Load balancer healthcheck returning {0} for {1} and has exceeded the timeout threshold set. Please roll back.".format(instance_id.state, instance_id.instance_id))
exit(2)
logging.info("ELB healthcheck OK == {0}: {1}".format(instance_id.instance_id, instance_id.state))
except Exception as e:
logging.error("Something went wrong during ELB health checks, please investigate:\n {0}".format(e))
exit(2)
return True

def healthcheck_new_instances(self):
''' Healthchecking new instances to ensure deployment was successful '''
group_name = self.get_autoscale_group_name()
instance_ids = self.get_all_instance_ids(group_name)
new_instance_ids = self.get_instance_ids_by_requested_build_tag(instance_ids, self.buildNum)
self.wait_for_new_instances(new_instance_ids) #Wait for new instances to be up and ready
self.lb_healthcheck(new_instance_ids) #Once instances are ready, healthcheck. If successful, decrease desired count.

def deploy(self):
''' Rollin Rollin Rollin, Rawhide! '''
group_name = self.get_autoscale_group_name()
logging.info("Build #: {0} Autoscale Group: {1}".format(self.buildNum, group_name))
self.set_autoscale_instance_desired_count(self.calculate_autoscale_desired_instance_count(group_name, 'increase'), group_name)
logging.info("Sleeping for 240 seconds to allow for instances to spin up")
sleep(240) #Need to wait until the instances come up in the load balancer
self.healthcheck_new_instances()
self.set_autoscale_instance_desired_count(self.calculate_autoscale_desired_instance_count(group_name, 'decrease'), group_name)
logging.info("Deployment Complete!")

def check_args():
if (len(argv) < 3):
logging.error("Please enter three arguments: {0} {1} {2} {3}".format(argv[0], 'env', 'project', 'buildnumber'))
exit(2)

def setup_logging():
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',level=logging.INFO)
logging.info("Begin Logging...")

def main():
setup_logging()
check_args()
l = RollingDeploy(argv[1], argv[2], argv[3])
l.deploy()

if __name__ == "__main__":
main()
3 changes: 3 additions & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@
]

tests_require = [
"mock",
"boto",
"moto"
]

def read(fname):
Expand Down
16 changes: 0 additions & 16 deletions tests/deploy_test.py

This file was deleted.

Loading

0 comments on commit aa808cc

Please sign in to comment.