Permalink
Cannot retrieve contributors at this time
Fetching contributors…
| #!/usr/bin/env python3 | |
| '''Launch an instance on ec2 to test cloud-init''' | |
| try: | |
| from argparse import ArgumentParser | |
| except ImportError: | |
| raise RuntimeError( | |
| 'Could not import argparse. Please install python-argparse ' | |
| 'package to continue') | |
| try: | |
| import boto3 | |
| from botocore.exceptions import ClientError, NoCredentialsError | |
| except ImportError: raise RuntimeError( | |
| 'Could not import boto3. Please install python3-boto3 ' | |
| 'package to continue') | |
| from base64 import b64encode | |
| import os | |
| import re | |
| import shutil | |
| from subprocess import ( | |
| CalledProcessError, DEVNULL, PIPE, STDOUT, Popen, call, check_call, | |
| check_output) | |
| import sys | |
| from time import sleep | |
| DEFAULT_REGION = 'us-east-2' | |
| DEFAULT_AVAILABILITY_ZONE = 'b' | |
| KEY_PAIR_NAME = 'cloud-init-integration' | |
| DEFAULT_TAG = 'cloud-init-integration' | |
| DEFAULT_SERIES = 'xenial' | |
| SECGROUP_NAME = 'Cloud-init integration test secgroup' | |
| SECGROUP_DESCRIPTION = 'Security group setting ssh ingress to instances' | |
| # Directory where script artifacts can be saved for sync | |
| DEFAULT_LOCAL_ARTIFACT_DIR = 'test-artifacts' | |
| REMOTE_ARTIFACT_DIR = '/tmp/cloud-init-script-artifacts' | |
| EC2_REGIONS = ( | |
| 'ap-northeast-1', 'ap-northeast-2', 'ap-south-1', | |
| 'ap-southeast-1', 'ap-southeast-2', 'ca-central-1', 'eu-central-1', | |
| 'eu-west-1', 'eu-west-2', 'sa-east-1', 'us-east-1', 'us-east-2', | |
| 'us-west-1', 'us-west-2') | |
| # See also | |
| # http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances | |
| CREATE_INSTANCE_DEFAULTS = { | |
| 'NetworkInterfaces': [{'AssociatePublicIpAddress': True, | |
| 'DeviceIndex': 0}], | |
| 'ImageId': 'ami-FIXME', # Must be set by our request | |
| 'KeyName': 'cloud-init-integration', | |
| 'InstanceType': 't2.micro', | |
| 'MaxCount': 1, # Deploy 1 instance of this type | |
| 'MinCount': 1, | |
| 'Monitoring': { 'Enabled': False }, | |
| 'Placement': { | |
| 'AvailabilityZone': '{}{}'.format( | |
| DEFAULT_REGION, DEFAULT_AVAILABILITY_ZONE)}, | |
| 'DisableApiTermination': False, | |
| 'EbsOptimized': False, | |
| 'InstanceInitiatedShutdownBehavior': 'stop' } | |
| class EC2Instance(object): | |
| """Wrapper around a boto ec2 instance providing methods for testing. | |
| Methods provided simplify integration tests and validation. """ | |
| _cloud_init_ran = False # True oncecloud-init completes on the instance | |
| def __init__(self, instance, pubkey, artifact_dir=REMOTE_ARTIFACT_DIR, | |
| verbose=False): | |
| """Initialize this EC2Instance helper class. | |
| @param instance: ec2 boto3 instance object. @param pubkey: Local path | |
| to the pubkey we'll use when contacting the instance. @param | |
| artifact_dir: The remote directory in which script results can be | |
| stored. This value is provided via runcmd as an environemnt variable | |
| $SCRIPT_ARTIFACT_DIR. @param verbose: Boolean, set True to see commands | |
| sent to and from instance. """ | |
| self.instance = instance | |
| self.pubkey = pubkey | |
| self.artifact_dir = artifact_dir | |
| self.verbose = verbose | |
| def scp(self, source_path, dest_path): | |
| """Scp files or dirs to an instance.""" | |
| hostname = self.instance.meta.data['PublicDnsName'] | |
| instance_user_at_host = 'ubuntu@{}'.format(hostname) | |
| source_path = source_path.replace('{INSTANCE}', instance_user_at_host) | |
| dest_path = dest_path.replace('{INSTANCE}', instance_user_at_host) | |
| cmd = ['scp', '-i', self.pubkey, '-o', 'StrictHostKeyChecking=no', | |
| '-o', 'UserKnownHostsFile=/dev/null', source_path, dest_path] | |
| check_output(cmd) | |
| def runcmd(self, command, pipe_in=None, pipe_out=False): | |
| """Run a command over ssh on the instance. | |
| @param command: The string of command(s) to execute on the remote | |
| instance. @param pipe_in: Optional Popen process to take as stdin to | |
| the command. @param pipe_out: Optionally, whether to return the popen | |
| process spawned to allow subprocess.PIPEs to use the stdout of the | |
| command. | |
| """ | |
| hostname = self.instance.meta.data['PublicDnsName'] | |
| stdin = None | |
| if pipe_in: | |
| stdin = pipe_in.stdout | |
| ssh_cmd = [ | |
| 'ssh', '-i', self.pubkey, '-o', 'StrictHostKeyChecking=no', '-o', | |
| 'UserKnownHostsFile=/dev/null', 'ubuntu@{}'.format(hostname), '--', | |
| 'SCRIPT_ARTIFACT_DIR={}'.format(self.artifact_dir)] | |
| proc = Popen(ssh_cmd + [command], stdin=stdin, stdout=PIPE) | |
| if pipe_in: | |
| pipe_in.stdout.close() # Allow p1 to receive SIGPIPE if p2 exits | |
| if pipe_out: | |
| return proc # Caller needs to communicate and close return | |
| proc.communicate() | |
| def run_scripts(self, scripts_dir, artifacts_dir): | |
| """Push scripts_dir to the instance, run them and return artifacts. | |
| Provide SCRIPT_ARTIFACT_DIR env variable to scripts and copy any files | |
| in remote SCRIPT_ARTIFACT_DIR into local artifacts_dir. | |
| @param scripts_dir: Local path to a scripts directory which contains | |
| executable scripts which can be run by run-parts on the instance. | |
| @param artifacts_dir: Local path where script artifacts/results will be | |
| copied. | |
| """ | |
| local_cmd = ['tar', '-czf', '-', scripts_dir] | |
| remote_artifact_basename = os.path.basename(self.artifact_dir) | |
| # Untar scripts_dir, run-parts scripts_dir, tar up self.artifact_dir | |
| remote_cmds = [ | |
| 'tar xzf - -C /tmp', 'mkdir -p {0}'.format(self.artifact_dir), | |
| 'SCRIPT_ARTIFACT_DIR={0} run-parts /tmp/{1}'.format( | |
| self.artifact_dir, scripts_dir), | |
| 'tar -czf - -C /tmp {}'.format(remote_artifact_basename)] | |
| proc1 = Popen(local_cmd, stdout=PIPE) # Tar up local scripts_dir | |
| # Perform all remote_cmds in a single ssh interaction. | |
| proc2 = self.runcmd( | |
| '; '.join(remote_cmds), pipe_in=proc1, pipe_out=True) | |
| # Untar self.artifact_dir locally | |
| proc3 = Popen(['tar', '-xzf', '-'], stdin=proc2.stdout, stdout=PIPE) | |
| proc2.stdout.close() | |
| out, err = proc3.communicate() | |
| # Move the remote_artifacts dirname to local artifacts_dir | |
| shutil.move(remote_artifact_basename, artifacts_dir) | |
| def wait_on_cloud_init(self): | |
| print("Waiting on cloud-init") | |
| self._cloud_init_ran = True | |
| out, err = self.runcmd( | |
| "while [ ! -f '/run/cloud-init/result.json' ]; do" | |
| " echo -n '.'; sleep 1; done") | |
| def update_proposed_cloud_init(self): | |
| """Update cloud-init package to the version present in proposed.""" | |
| match = re.match( | |
| r'.*ubuntu-(?P<series>[^-]*).*', self.instance.image.name) | |
| if not match: | |
| raise RuntimeError( | |
| 'Could not determine ubuntu series from image: {}'.format( | |
| self.instance.image.name)) | |
| series = match.group('series') | |
| self.runcmd( | |
| "egrep 'deb .*{0} main' /etc/apt/sources.list |" | |
| " sed 's/{0}/{0}-proposed/' > /tmp/cloud-init-proposed.list;" | |
| " sudo mv /tmp/cloud-init-proposed.list /etc/apt/sources.list.d/;" | |
| " sudo apt-get update;" | |
| " sudo DEBIAN_FRONTEND=noninteractive apt-get install cloud-init" | |
| " --yes".format(series)) | |
| def clean(self, reboot=False): | |
| """Clean cloud-init artifacts from the system making it look 'new'. | |
| A cleaned system allows cloud-init to boot as if it is seeing the | |
| instance on first boot. | |
| """ | |
| clean_cmd = 'sudo rm -Rf /var/lib/cloud /var/log/cloud*' | |
| self.runcmd(clean_cmd) | |
| self._cloud_init_ran = True | |
| if reboot: | |
| print("Waiting for EC2 instance clean reboot") | |
| self.reboot() | |
| def destroy(self): | |
| """Stop and destroy the instance from EC2.""" | |
| self.instance.terminate() | |
| def reboot(self): | |
| """Stop and destroy the instance from EC2.""" | |
| self.instance.reboot() | |
| sleep(8) | |
| self.instance.reload() | |
| self.wait_on_cloud_init() | |
| def start(self): | |
| """Start the instance running in EC2.""" | |
| self.instance.start() | |
| self.wait_on_cloud_init() | |
| def stop(self): | |
| """Stop the instance from running in EC2.""" | |
| self.instance.stop() | |
| def check_deps(packages): | |
| missing_packages = [] | |
| for package in packages: | |
| dpkg_cmd = ['dpkg', '-l', package] | |
| if call(dpkg_cmd, stdout=DEVNULL, stderr=STDOUT) != 0: | |
| missing_packages.append(package) | |
| if missing_packages: | |
| error('Missing packages: apt install {0}'.format( | |
| ' '.join(missing_packages))) | |
| def get_ec2_image_id(series_name, region_name, root_store_name='ssd', | |
| virt_type='hvm'): | |
| """Return a valid image id for the given series and region_name.""" | |
| try: | |
| output = check_output(['image-status', 'ec2']).decode('utf-8') | |
| except (FileNotFoundError, CalledProcessError) as e: | |
| print('Could not find image-status command. Try' | |
| ' git clone https://github.com/smoser/talk-simplestreams.git') | |
| sys.exit(1) | |
| for line in output.splitlines(): | |
| series, version, region, root_store, virt, ami_id = line.split() | |
| if (series == series_name and region == region_name and | |
| root_store == root_store_name and virt == virt_type): | |
| return ami_id | |
| raise RuntimeError("Couldn't find an image id for series {} in {}".format( | |
| series_name, region_name)) | |
| def get_parser(): | |
| """Return an argument parser for this command.""" | |
| parser = ArgumentParser(description=__doc__) | |
| parser.add_argument( | |
| '-i', '--image-id', type=str, help='Specify the EC2 ami id to deploy') | |
| parser.add_argument( | |
| '--deb-file', type=str, required=False, dest='deb_file', | |
| help='Provide a local deb for install on the instance') | |
| parser.add_argument( | |
| '--script-dir', type=str, dest='script_dir', | |
| help=('Specify a directory of scripts to run on the instance.' | |
| ' Scripts should use SCRIPT_ARTIFACT_DIR env variable for' | |
| ' output.')) | |
| parser.add_argument( | |
| '--pubkey-file', required=False, type=str, dest='pubkey_file', | |
| default=os.path.expanduser('~/.ssh/id_rsa.pub'), | |
| help=('Specify a local directory where results of script output' | |
| ' artifacts are saved')) | |
| parser.add_argument( | |
| '--script-artifacts-dir', type=str, dest='artifacts_dir', | |
| default=DEFAULT_LOCAL_ARTIFACT_DIR, | |
| help=('Specify a local directory where results of script output' | |
| ' artifacts are saved')) | |
| parser.add_argument( | |
| '--destroy', action='store_true', default=False, | |
| help='Destroy instance when done with the test.') | |
| parser.add_argument( | |
| '-p', '--proposed', action='store_true', default=False, | |
| help='Update instance cloud-init to the version in <series>-proposed.') | |
| parser.add_argument( | |
| '-r', '--region', type=str, choices=EC2_REGIONS, default=DEFAULT_REGION, | |
| help='Specify a region to deploy to [default=%s]' % DEFAULT_REGION) | |
| parser.add_argument( | |
| '-z', '--zone', type=str, choices=['a', 'b', 'c'], | |
| default=DEFAULT_AVAILABILITY_ZONE, | |
| help='Specify a zone to deploy to [default=%s]' % DEFAULT_AVAILABILITY_ZONE) | |
| parser.add_argument( | |
| '-u', '--user-data-file', dest='user_data_file', type=str, | |
| help='Optional user-data file to run during instance initialization') | |
| parser.add_argument( | |
| '-s', '--series', type=str, default=DEFAULT_SERIES, | |
| help='The default ubuntu series name to launch. [default={}]'.format( | |
| DEFAULT_SERIES)) | |
| parser.add_argument( | |
| '-v', '--verbose', action='store_true', default=False, | |
| help='Print more information about the operations performed.') | |
| parser.add_argument( | |
| '-c', '--clean', action='store_true', default=False, | |
| help=('Remove cloud-init artifacts and reboot system to re-run from' | |
| 'a pseudo-clean system boot.')) | |
| return parser | |
| def ec2_import_keypair(ec2, key_path): | |
| """Create a new key in EC2 if not present using the pubkey at key_path. | |
| @returns: ec2.KeyPairInfo | |
| """ | |
| try: | |
| pairs = [k for k in ec2.key_pairs.filter( | |
| KeyNames=[KEY_PAIR_NAME])] | |
| if pairs: | |
| return pairs[0] # We already have the key | |
| except ClientError as e: | |
| if e.response['Error']['Code'] == 'InvalidKeyPair.NotFound': | |
| pass | |
| else: | |
| raise | |
| with open(key_path, 'rb') as stream: | |
| key_material = stream.read() | |
| return ec2.import_key_pair( | |
| KeyName=KEY_PAIR_NAME, PublicKeyMaterial=key_material) | |
| def ec2_create_vpc(ec2, region, zone, tag=None): | |
| """Create our tagged VPC and subnet for integration testing.""" | |
| if not tag: | |
| tag = DEFAULT_TAG | |
| region_zone = '{0}{1}'.format(region, zone) | |
| ec2_client = boto3.client('ec2', region_name=region) | |
| vpcs = ec2_client.describe_vpcs( | |
| Filters=[{'Name': 'tag:Name', 'Values': [tag]}]) | |
| if vpcs['Vpcs']: | |
| vpc = ec2.Vpc(vpcs['Vpcs'][0]['VpcId']) | |
| else: | |
| vpc = ec2.create_vpc( | |
| CidrBlock='10.41.0.0/16', AmazonProvidedIpv6CidrBlock=True) | |
| vpc.create_tags(Tags=[{'Key': 'Name', 'Value': tag}]) | |
| ec2_client.modify_vpc_attribute( | |
| EnableDnsHostnames={'Value': True}, VpcId=vpc.id) | |
| ec2_client.modify_vpc_attribute( | |
| EnableDnsSupport={'Value': True}, VpcId=vpc.id) | |
| vpc.wait_until_available() | |
| gws = [gw for gw in vpc.internet_gateways.all()] | |
| if gws: | |
| gw = gws[0] | |
| else: | |
| gw = ec2.create_internet_gateway() | |
| vpc.attach_internet_gateway(InternetGatewayId=gw.id) | |
| subnets = ec2_client.describe_subnets( | |
| Filters=[{'Name': 'tag:Name', 'Values': [tag]}]) | |
| if subnets['Subnets']: | |
| subnet = ec2.Subnet(subnets['Subnets'][0]['SubnetId']) | |
| else: | |
| ipv6_subnet_cidr = vpc.ipv6_cidr_block_association_set[0]['Ipv6CidrBlock'] | |
| ipv6_subnet_cidr = ipv6_subnet_cidr[:-2] + '64' | |
| subnet = vpc.create_subnet( | |
| CidrBlock='10.41.41.0/24', Ipv6CidrBlock=ipv6_subnet_cidr, | |
| AvailabilityZone=region_zone) | |
| subnet.create_tags(Tags=[{'Key': 'Name', 'Value': tag}]) | |
| route_tables = [rt for rt in vpc.route_tables.filter( | |
| Filters=[{'Name': 'tag:Name', 'Values': [tag]}])] | |
| if route_tables: | |
| route_table = route_tables[0] | |
| else: | |
| route_table = vpc.create_route_table() | |
| route_table.create_tags(Tags=[{'Key': 'Name', 'Value': tag}]) | |
| route_ipv4 = route_table.create_route( | |
| DestinationCidrBlock='0.0.0.0/0', GatewayId=gw.id) | |
| route_ipv6 = route_table.create_route( | |
| DestinationIpv6CidrBlock='::/0', GatewayId=gw.id) | |
| route_table.associate_with_subnet(SubnetId=subnet.id) | |
| return vpc, subnet, route_table | |
| def ec2_create_secgroup(ec2, vpc_id, sec_group_name=None, ipv4_cidr=None, | |
| ipv6_cidr=None, tag=None): | |
| """Create cloud-init's test sec group with ssh ingress from ipv4_cidr.""" | |
| if sec_group_name is None: | |
| sec_group_name = SECGROUP_NAME | |
| if ipv4_cidr is None: | |
| ipv4_cidr = '0.0.0.0/0' | |
| if ipv6_cidr is None: | |
| ipv6_cidr = '::/0' | |
| if not tag: | |
| tag = DEFAULT_TAG | |
| existing_groups = [ | |
| group for group in ec2.security_groups.filter( | |
| Filters=[{'Name': 'tag-key', 'Values': ['Name']}, | |
| {'Name': 'tag-value', 'Values': [tag]}])] | |
| if existing_groups: | |
| return existing_groups[0] | |
| secgroup = ec2.create_security_group( | |
| GroupName=sec_group_name, Description=SECGROUP_DESCRIPTION, | |
| VpcId=vpc_id) | |
| secgroup.create_tags(Tags=[{'Key': 'Name', 'Value': tag}]) | |
| permissions = [ | |
| {'FromPort':22, 'ToPort': 22, 'IpProtocol': 'tcp', | |
| 'IpRanges': [{'CidrIp': ipv4_cidr}]}] | |
| # 'Ipv6Ranges': [{'CidrIpv6': ipv6_cidr}]}] | |
| secgroup.authorize_ingress(IpPermissions=permissions) | |
| return secgroup | |
| def ec2_create_instance(ec2, ami_id, region_zone, secgroup, subnet, | |
| user_data_file=None): | |
| """Create and instance and wait for started state.""" | |
| kwargs = CREATE_INSTANCE_DEFAULTS | |
| kwargs['ImageId'] = ami_id | |
| kwargs['NetworkInterfaces'][0]['Groups'] = [secgroup.id] | |
| kwargs['NetworkInterfaces'][0]['SubnetId'] = subnet.id | |
| kwargs['Placement']['AvailabilityZone'] = region_zone | |
| if user_data_file: | |
| if not os.path.exists(user_data_file): | |
| raise RuntimeError( 'user-data file {} does not exist'.format(user_data_file)) | |
| with open(user_data_file) as stream: | |
| kwargs['UserData'] = stream.read() | |
| [instance] = ec2.create_instances(**kwargs) | |
| # block until running | |
| print('Waiting for EC2 instance initialization') | |
| instance.wait_until_running( | |
| Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]) | |
| instance.reload() | |
| print('Found EC2 instance: ssh ubuntu@{}'.format( | |
| instance.meta.data['PublicDnsName'])) | |
| return instance | |
| def error(message): | |
| sys.stderr.write('ERROR: {0}\n'.format(message)) | |
| sys.exit(1) | |
| def main(): | |
| parser = get_parser() | |
| args = parser.parse_args() | |
| check_deps(['awscli', 'simplestreams']) | |
| if not os.path.isfile(args.pubkey_file): | |
| error("--pubkey-file=%s: not a file\n" % args.pubkey_file) | |
| image_id = args.image_id | |
| if not image_id: | |
| image_id = get_ec2_image_id(args.series, args.region) | |
| ec2 = boto3.resource('ec2', region_name=args.region) | |
| region_zone = '{0}{1}'.format(args.region, args.zone) | |
| try: | |
| vpc, subnet, routetable = ec2_create_vpc( | |
| ec2, region=args.region, zone=args.zone) | |
| secgroup = ec2_create_secgroup(ec2, vpc.id) | |
| ec2_import_keypair(ec2, args.pubkey_file) | |
| instance = ec2_create_instance( | |
| ec2, image_id, region_zone, secgroup, subnet, args.user_data_file) | |
| except NoCredentialsError as e: | |
| error('Credentials undefined or incorrect. Check ~/.aws/credentials') | |
| ec2_instance = EC2Instance(instance, args.pubkey_file) | |
| if args.deb_file: | |
| ec2_instance.scp(args.deb_file, '{INSTANCE}:.') | |
| deb_filename = os.path.basename(args.deb_file) | |
| ec2_instance.runcmd('sudo apt-get install --yes python3-jsonschema;' | |
| ' sudo dpkg -i ./{}'.format(deb_filename)) | |
| if args.proposed: | |
| ec2_instance.update_proposed_cloud_init() | |
| if args.clean: | |
| ec2_instance.clean(reboot=True) | |
| if args.script_dir: | |
| ec2_instance.run_scripts(args.script_dir, args.artifacts_dir) | |
| if args.destroy: | |
| ec2_instance.destroy() | |
| return 0 | |
| if __name__ == "__main__": | |
| sys.exit(main()) |