diff --git a/Makefile b/Makefile index b934946b..d1ac7238 100644 --- a/Makefile +++ b/Makefile @@ -15,4 +15,4 @@ test: devtest: # Unit testing with the -x option, aborts testing after first failure # Useful for development when tests are long - py.test -x --pyargs cloudknot --cov-report term-missing --cov=cloudknot --fulltrace + py.test -x --pyargs cloudknot --cov-report term-missing --cov=cloudknot diff --git a/cloudknot/aws/base_classes.py b/cloudknot/aws/base_classes.py index 9f7e8112..d1f731eb 100644 --- a/cloudknot/aws/base_classes.py +++ b/cloudknot/aws/base_classes.py @@ -11,10 +11,16 @@ from ..config import get_config_file -__all__ = ["ResourceDoesNotExistException", "ResourceClobberedException", - "ResourceExistsException", "CannotDeleteResourceException", - "NamedObject", "ObjectWithArn", "ObjectWithUsernameAndMemory", - "clients", "wait_for_compute_environment", "wait_for_job_queue"] +__all__ = [ + "ResourceDoesNotExistException", "ResourceClobberedException", + "ResourceExistsException", "CannotDeleteResourceException", + "RegionException", + "NamedObject", "ObjectWithArn", "ObjectWithUsernameAndMemory", + "clients", "refresh_clients", + "wait_for_compute_environment", "wait_for_job_queue", + "get_region", "set_region", + "get_profile", "set_profile", "list_profiles", +] mod_logger = logging.getLogger(__name__) @@ -356,7 +362,7 @@ def __init__(self, message, resource_id): resource_id : string The resource ID (e.g. ARN, VPC-ID) of the requested resource """ - super(ResourceDoesNotExistException, self).__init__(message) + super(ResourceClobberedException, self).__init__(message) self.resource_id = resource_id @@ -377,6 +383,28 @@ def __init__(self, message, resource_id): self.resource_id = resource_id +# noinspection PyPropertyAccess,PyAttributeOutsideInit +class RegionException(Exception): + """Exception indicating that an AWS resource's region does not match + the current region""" + def __init__(self, resource_region): + """Initialize the Exception + + Parameters + ---------- + resource_region : string + The resource region + """ + super(RegionException, self).__init__( + "This resource's region ({resource:s}) does not match the " + "current region ({current:s})".format( + resource=resource_region, current=get_region() + ) + ) + self.current_region = get_region() + self.resource_region = resource_region + + # noinspection PyPropertyAccess,PyAttributeOutsideInit class NamedObject(object): """Base class for building objects with name property""" @@ -390,9 +418,11 @@ def __init__(self, name): """ self._name = str(name) self._clobbered = False + self._region = get_region() name = property(operator.attrgetter('_name')) clobbered = property(operator.attrgetter('_clobbered')) + region = property(operator.attrgetter('_region')) # noinspection PyPropertyAccess,PyAttributeOutsideInit @@ -412,9 +442,7 @@ def __init__(self, name): super(ObjectWithArn, self).__init__(name=name) self._arn = None - @property - def arn(self): - return self._arn + arn = property(operator.attrgetter('_arn')) # noinspection PyPropertyAccess,PyAttributeOutsideInit @@ -432,11 +460,11 @@ def __init__(self, name, memory=32000, username='cloudknot-user'): Name of the object memory : int - memory (MiB) to be used for this job definition + memory (MiB) to be used for this resource Default: 32000 username : string - username for be used for this job definition + username for be used for this resource Default: cloudknot-user """ super(ObjectWithUsernameAndMemory, self).__init__(name=name) diff --git a/cloudknot/aws/batch.py b/cloudknot/aws/batch.py index 96deffd6..e0ca6d05 100644 --- a/cloudknot/aws/batch.py +++ b/cloudknot/aws/batch.py @@ -8,10 +8,10 @@ from collections import namedtuple from .base_classes import NamedObject, ObjectWithArn, \ - ObjectWithUsernameAndMemory, clients, \ + ObjectWithUsernameAndMemory, clients, RegionException, \ ResourceExistsException, ResourceDoesNotExistException, \ ResourceClobberedException, CannotDeleteResourceException, \ - wait_for_job_queue + wait_for_job_queue, get_region from .ec2 import Vpc, SecurityGroup from .ecr import DockerRepo from .iam import IamRole @@ -109,8 +109,9 @@ def __init__(self, arn=None, name=None, job_role=None, docker_image=None, self._arn = resource.arn # Add to config file + self._section_name = 'job-definitions ' + self.region cloudknot.config.add_resource( - 'job-definitions', self.name, self.arn + self._section_name, self.name, self.arn ) mod_logger.info( @@ -295,7 +296,8 @@ def _create(self): arn = response['jobDefinitionArn'] # Add this job def to the list of job definitions in the config file - cloudknot.config.add_resource('job-definitions', self.name, arn) + self._section_name = 'job-definitions ' + self.region + cloudknot.config.add_resource(self._section_name, self.name, arn) mod_logger.info('Created AWS batch job definition {name:s}'.format( name=self.name @@ -310,10 +312,13 @@ def clobber(self): ------- None """ + if self.region != get_region(): + raise RegionException(resource_region=self.region) + clients['batch'].deregister_job_definition(jobDefinition=self.arn) # Remove this job def from the list of job defs in the config file - cloudknot.config.remove_resource('job-definitions', self.name) + cloudknot.config.remove_resource(self._section_name, self.name) # Set the clobbered parameter to True, # preventing subsequent method calls @@ -475,8 +480,9 @@ def __init__(self, arn=None, name=None, batch_service_role=None, self._bid_percentage = resource.bid_percentage self._arn = resource.arn + self._section_name = 'compute-environments ' + self.region cloudknot.config.add_resource( - 'compute-environments', self.name, self.arn + self._section_name, self.name, self.arn ) mod_logger.info( @@ -857,7 +863,8 @@ def _create(self): arn = response['computeEnvironmentArn'] # Add this compute env to the list of compute envs in the config file - cloudknot.config.add_resource('compute-environments', self.name, arn) + self._section_name = 'compute-environments ' + self.region + cloudknot.config.add_resource(self._section_name, self.name, arn) mod_logger.info('Created compute environment {name:s}'.format( name=self.name @@ -872,6 +879,9 @@ def clobber(self): ------- None """ + if self.region != get_region(): + raise RegionException(resource_region=self.region) + retry = tenacity.Retrying( wait=tenacity.wait_exponential(max=32), stop=tenacity.stop_after_delay(60), @@ -925,9 +935,8 @@ def clobber(self): resource_id=associated_queues ) - # Remove this compute env from the list of compute envs - # in config file - cloudknot.config.remove_resource('compute-environments', self.name) + # Remove this compute env from the list of compute envs in config file + cloudknot.config.remove_resource(self._section_name, self.name) # Set the clobbered parameter to True, # preventing subsequent method calls @@ -1002,7 +1011,10 @@ def __init__(self, arn=None, name=None, compute_environments=None, self._priority = resource.priority self._arn = resource.arn - cloudknot.config.add_resource('job-queues', self.name, self.arn) + self._section_name = 'job-queues ' + self.region + cloudknot.config.add_resource( + self._section_name, self.name, self.arn + ) mod_logger.info('Retrieved pre-existing job queue {name:s}'.format( name=self.name @@ -1148,7 +1160,8 @@ def _create(self): wait_for_job_queue(name=self.name, max_wait_time=180) # Add this job queue to the list of job queues in the config file - cloudknot.config.add_resource('job-queues', self.name, arn) + self._section_name = 'job-queues ' + self.region + cloudknot.config.add_resource(self._section_name, self.name, arn) mod_logger.info('Created job queue {name:s}'.format(name=self.name)) @@ -1174,6 +1187,9 @@ def get_jobs(self, status='ALL'): self.arn ) + if self.region != get_region(): + raise RegionException(resource_region=self.region) + # Validate input allowed_statuses = ['ALL', 'SUBMITTED', 'PENDING', 'RUNNABLE', 'STARTING', 'RUNNING', 'SUCCEEDED', 'FAILED'] @@ -1199,6 +1215,9 @@ def clobber(self): ------- None """ + if self.region != get_region(): + raise RegionException(resource_region=self.region) + # First, disable submissions to the queue retry = tenacity.Retrying( wait=tenacity.wait_exponential(max=32), @@ -1233,7 +1252,7 @@ def clobber(self): retry.call(clients['batch'].delete_job_queue, jobQueue=self.arn) # Remove this job queue from the list of job queues in config file - cloudknot.config.remove_resource('job-queues', self.name) + cloudknot.config.remove_resource(self._section_name, self.name) # Set the clobbered parameter to True, # preventing subsequent method calls @@ -1305,7 +1324,10 @@ def __init__(self, job_id=None, name=None, job_queue=None, self._environment_variables = job.environment_variables self._job_id = job.job_id - cloudknot.config.add_resource('batch-jobs', self.job_id, self.name) + self._section_name = 'batch-jobs ' + self.region + cloudknot.config.add_resource( + self._section_name, self.job_id, self.name + ) mod_logger.info('Retrieved pre-existing batch job {id:s}'.format( id=self.job_id @@ -1425,7 +1447,10 @@ def _create(self): # pragma: nocover job_id = response['jobId'] # Remove this job from the list of jobs in the config file - cloudknot.config.add_resource('batch-jobs', self.job_id, self.name) + self._section_name = 'batch-jobs ' + self.region + cloudknot.config.add_resource( + self._section_name, self.job_id, self.name + ) mod_logger.info( 'Submitted batch job {name:s} with jobID ' @@ -1450,6 +1475,9 @@ def status(self): self.job_id ) + if self.region != get_region(): + raise RegionException(resource_region=self.region) + # Query the job_id response = clients['batch'].describe_jobs(jobs=[self.job_id]) job = response.get('jobs')[0] @@ -1481,6 +1509,9 @@ def terminate(self, reason): self.job_id ) + if self.region != get_region(): + raise RegionException(resource_region=self.region) + # Require the user to supply a reason for job termination if not isinstance(reason, six.string_types): raise ValueError('reason must be a string.') @@ -1512,4 +1543,4 @@ def clobber(self): self._clobbered = True # Remove this job from the list of jobs in the config file - cloudknot.config.remove_resource('batch-jobs', self.job_id) + cloudknot.config.remove_resource(self._section_name, self.job_id) diff --git a/cloudknot/aws/ec2.py b/cloudknot/aws/ec2.py index ec0a36a9..9bcab45e 100644 --- a/cloudknot/aws/ec2.py +++ b/cloudknot/aws/ec2.py @@ -78,7 +78,10 @@ def __init__(self, vpc_id=None, name=None, ipv4_cidr=None, self._instance_tenancy = resource.instance_tenancy self._subnet_ids = resource.subnet_ids - cloudknot.config.add_resource('vpc', self.vpc_id, self.name) + self._section_name = 'vpc ' + self.region + cloudknot.config.add_resource( + self._section_name, self.vpc_id, self.name + ) mod_logger.info('Retrieved pre-existing VPC {id:s}'.format( id=self.vpc_id @@ -284,7 +287,8 @@ def _create(self): ) # Add this VPC to the list of VPCs in the config file - cloudknot.config.add_resource('vpc', vpc_id, self.name) + self._section_name = 'vpc ' + self.region + cloudknot.config.add_resource(self._section_name, vpc_id, self.name) return vpc_id @@ -379,7 +383,7 @@ def clobber(self): clients['ec2'].delete_vpc(VpcId=self.vpc_id) # Remove this VPC from the list of VPCs in the config file - cloudknot.config.remove_resource('vpc', self.vpc_id) + cloudknot.config.remove_resource(self._section_name, self.vpc_id) # Set the clobbered parameter to True, # preventing subsequent method calls @@ -480,8 +484,9 @@ def __init__(self, security_group_id=None, name=None, vpc=None, resource_id=self.security_group_id ) + self._section_name = 'security-groups ' + self.region cloudknot.config.add_resource( - 'security-groups', self.security_group_id, self.name + self._section_name, self.security_group_id, self.name ) mod_logger.info( @@ -646,9 +651,9 @@ def _create(self): ] ) - # Add this security group to the list of security groups in the - # config file - cloudknot.config.add_resource('security-groups', group_id, self.name) + # Add this security group to the config file + self._section_name = 'security-groups ' + self.region + cloudknot.config.add_resource(self._section_name, group_id, self.name) return group_id @@ -695,7 +700,7 @@ def has_security_group(instance, sg_id): # Remove this VPC from the list of VPCs in the config file cloudknot.config.remove_resource( - 'security-groups', self.security_group_id + self._section_name, self.security_group_id ) # Set the clobbered parameter to True, diff --git a/cloudknot/aws/ecr.py b/cloudknot/aws/ecr.py index 219ab28b..0b62eb94 100644 --- a/cloudknot/aws/ecr.py +++ b/cloudknot/aws/ecr.py @@ -35,8 +35,9 @@ def __init__(self, name): self._repo_registry_id = repo_info.registry_id # Add to config file + self._section_name = 'docker-repos ' + self.region cloudknot.config.add_resource( - 'docker-repos', self.name, self.repo_uri + self._section_name, self.name, self.repo_uri ) # Declare read only properties @@ -102,7 +103,7 @@ def clobber(self): pass # Remove from the config file - cloudknot.config.remove_resource('docker-repos', self.name) + cloudknot.config.remove_resource(self._section_name, self.name) # Set the clobbered parameter to True, # preventing subsequent method calls diff --git a/cloudknot/aws/iam.py b/cloudknot/aws/iam.py index e047b7b4..60a776b2 100644 --- a/cloudknot/aws/iam.py +++ b/cloudknot/aws/iam.py @@ -392,7 +392,7 @@ def is_deleting(res): clients['batch'].describe_compute_environments, computeEnvironments=[ce['computeEnvironmentArn']] ) - except tenacity.RetryError: + except tenacity.RetryError: # pragma: nocover raise CannotDeleteResourceException( 'Could not delete this batch service role because it ' 'it is taking too long for a dependent compute ' diff --git a/cloudknot/cloudknot.py b/cloudknot/cloudknot.py index 39ea7d95..1d43c9ca 100644 --- a/cloudknot/cloudknot.py +++ b/cloudknot/cloudknot.py @@ -94,7 +94,7 @@ def __init__(self, name='default', batch_service_role_name=None, # Check for existence of this pars in the config file config = configparser.ConfigParser() config.read(get_config_file()) - self._pars_name = 'pars ' + name + self._pars_name = 'pars ' + self.name if self._pars_name in config.sections(): # Pars exists, check that user did not provide any resource names if any([batch_service_role_name, ecs_instance_role_name, @@ -169,10 +169,10 @@ def __init__(self, name='default', batch_service_role_name=None, try: # Use config values to adopt VPC if it exists already - id = config.get(self._pars_name, 'vpc') - self._vpc = aws.Vpc(vpc_id=id) - mod_logger.info('PARS {name:s} adopted VPC {id:s}'.format( - name=name, id=id + vpcid = config.get(self._pars_name, 'vpc') + self._vpc = aws.Vpc(vpc_id=vpcid) + mod_logger.info('PARS {name:s} adopted VPC {vpcid:s}'.format( + name=name, vpcid=vpcid )) except aws.ResourceDoesNotExistException: # Otherwise create the new VPC @@ -182,19 +182,19 @@ def __init__(self, name='default', batch_service_role_name=None, config.set(self._pars_name, 'vpc', self.vpc.vpc_id) with open(get_config_file(), 'w') as f: config.write(f) - mod_logger.info('PARS {name:s} created VPC {id:s}'.format( - name=name, id=self.vpc.vpc_id + mod_logger.info('PARS {name:s} created VPC {vpcid:s}'.format( + name=name, vpcid=self.vpc.vpc_id )) try: # Use config values to adopt security group if it exists - id = config.get(self._pars_name, 'security-group') + sgid = config.get(self._pars_name, 'security-group') self._security_group = aws.SecurityGroup( - security_group_id=id + security_group_id=sgid ) mod_logger.info( - 'PARS {name:s} adopted security group {id:s}'.format( - name=name, id=id + 'PARS {name:s} adopted security group {sgid:s}'.format( + name=name, sgid=sgid ) ) except aws.ResourceDoesNotExistException: @@ -212,14 +212,22 @@ def __init__(self, name='default', batch_service_role_name=None, with open(get_config_file(), 'w') as f: config.write(f) mod_logger.info( - 'PARS {name:s} created security group {id:s}'.format( - name=name, id=self.security_group.security_group_id + 'PARS {name:s} created security group {sgid:s}'.format( + name=name, sgid=self.security_group.security_group_id ) ) - # Save config to file + # Verify that the VPC and security group regions match + if not (self.vpc.region == self.security_group.region): + raise aws.RegionException(self.vpc.region) + + # Set the PARS region to match the VPC and security group + self._region = self.vpc.region config = configparser.ConfigParser() config.read(get_config_file()) + config.set(self._pars_name, 'region', self.region) + + # Save config to file with open(get_config_file(), 'w') as f: config.write(f) else: @@ -326,22 +334,26 @@ def __init__(self, name='default', batch_service_role_name=None, # Adopt the VPC self._vpc = aws.Vpc(vpc_id=vpc_id) - mod_logger.info('PARS {name:s} adopted VPC {id:s}'.format( - name=name, id=vpc_id + mod_logger.info('PARS {name:s} adopted VPC {vpcid:s}'.format( + name=name, vpcid=vpc_id )) else: try: # Create new VPC self._vpc = aws.Vpc(name=vpc_name) - mod_logger.info('PARS {name:s} created VPC {id:s}'.format( - name=name, id=self.vpc.vpc_id - )) + mod_logger.info( + 'PARS {name:s} created VPC {vpcid:s}'.format( + name=name, vpcid=self.vpc.vpc_id + ) + ) except aws.ResourceExistsException as e: # If it already exists, simply adopt it self._vpc = aws.Vpc(vpc_id=e.resource_id) - mod_logger.info('PARS {name:s} adopted VPC {id:s}'.format( - name=name, id=e.resource_id - )) + mod_logger.info( + 'PARS {name:s} adopted VPC {vpcid:s}'.format( + name=name, vpcid=e.resource_id + ) + ) if security_group_id: # Validate security_group_id input @@ -359,8 +371,8 @@ def __init__(self, name='default', batch_service_role_name=None, security_group_id=security_group_id ) mod_logger.info( - 'PARS {name:s} adopted security group {id:s}'.format( - name=name, id=security_group_id + 'PARS {name:s} adopted security group {sgid:s}'.format( + name=name, sgid=security_group_id ) ) else: @@ -371,8 +383,9 @@ def __init__(self, name='default', batch_service_role_name=None, vpc=self.vpc ) mod_logger.info( - 'PARS {name:s} created security group {id:s}'.format( - name=name, id=self.security_group.security_group_id + 'PARS {name:s} created security group {sgid:s}'.format( + name=name, + sgid=self.security_group.security_group_id ) ) except aws.ResourceExistsException as e: @@ -381,16 +394,24 @@ def __init__(self, name='default', batch_service_role_name=None, security_group_id=e.resource_id ) mod_logger.info( - 'PARS {name:s} adopted security group {id:s}'.format( - name=name, id=e.resource_id + 'PARS {name:s} adopted security group {sgid:s}'.format( + name=name, sgid=e.resource_id ) ) + # Verify that the VPC and security group regions match + if not (self.vpc.region == self.security_group.region): + raise aws.RegionException(self.vpc.region) + + # Set the PARS region to match the VPC and security group + self._region = self.vpc.region + # Save the new pars resources in config object # Use config.set() for python 2.7 compatibility config = configparser.ConfigParser() config.read(get_config_file()) config.add_section(self._pars_name) + config.set(self._pars_name, 'region', self.region) config.set( self._pars_name, 'batch-service-role', self._batch_service_role.name @@ -412,7 +433,6 @@ def __init__(self, name='default', batch_service_role_name=None, with open(get_config_file(), 'w') as f: config.write(f) - name = property(fget=operator.attrgetter('_name')) pars_name = property(fget=operator.attrgetter('_pars_name')) @staticmethod @@ -512,6 +532,9 @@ def vpc(self, v): if not isinstance(v, aws.Vpc): raise ValueError('new vpc must be an instance of Vpc') + if v.region != self._vpc.region: + raise aws.RegionException(v.region) + mod_logger.warning( 'You are setting a new VPC for PARS {name:s}. The old ' 'VPC {vpc_id:s} will be clobbered.'.format( @@ -542,8 +565,8 @@ def vpc(self, v): config.write(f) mod_logger.info( - 'PARS {name:s} adopted new VPC {id:s}'.format( - name=self.name, id=self.vpc.vpc_id + 'PARS {name:s} adopted new VPC {vpcid:s}'.format( + name=self.name, vpcid=self.vpc.vpc_id ) ) @@ -574,6 +597,9 @@ def security_group(self, sg): raise ValueError('new security group must be an instance of ' 'SecurityGroup') + if sg.region != self._security_group.region: + raise aws.RegionException(sg.region) + mod_logger.warning( 'You are setting a new security group for PARS {name:s}. The old ' 'security group {sg_id:s} will be clobbered.'.format( @@ -592,8 +618,8 @@ def security_group(self, sg): config.write(f) mod_logger.info( - 'PARS {name:s} adopted new security group {id:s}'.format( - name=self.name, id=sg.security_group_id + 'PARS {name:s} adopted new security group {sgid:s}'.format( + name=self.name, sgid=sg.security_group_id ) ) @@ -604,6 +630,9 @@ def clobber(self): ------- None """ + if self.region != aws.get_region(): + raise aws.RegionException(self.region) + # Delete all associated AWS resources self._security_group.clobber() self._vpc.clobber() @@ -772,7 +801,7 @@ def __init__(self, name='default', **kwargs): pars_name = config.get(self._knot_name, 'pars') self._pars = Pars(name=pars_name) mod_logger.info('Knot {name:s} adopted PARS ' - '{id:s}'.format(name=self.name, id=self.pars.name)) + '{p:s}'.format(name=self.name, p=self.pars.name)) image_name = config.get(self._knot_name, 'docker-image') self._docker_image = dockerimage.DockerImage(name=image_name) @@ -866,13 +895,13 @@ def __init__(self, name='default', **kwargs): if not isinstance(pars, Pars): raise ValueError('pars must be a Pars instance.') self._pars = pars - mod_logger.info('knot {name:s} adopted PARS {id:s}'.format( - name=self.name, id=self.pars.name + mod_logger.info('knot {name:s} adopted PARS {p:s}'.format( + name=self.name, p=self.pars.name )) else: self._pars = Pars() - mod_logger.info('knot {name:s} created PARS {id:s}'.format( - name=self.name, id=self.pars.name + mod_logger.info('knot {name:s} created PARS {p:s}'.format( + name=self.name, p=self.pars.name )) # Create and build the docker image @@ -886,8 +915,8 @@ def __init__(self, name='default', **kwargs): self.docker_image.build(tags=image_tags) - mod_logger.info('knot {name:s} built docker image {id!s}'.format( - name=self.name, id=self.docker_image.images + mod_logger.info('knot {name:s} built docker image {i!s}'.format( + name=self.name, i=self.docker_image.images )) # Create the remote repo @@ -910,7 +939,7 @@ def __init__(self, name='default', **kwargs): mod_logger.info( 'knot {name:s} created/adopted docker repo ' - '{id:s}'.format(name=self.name, id=self.docker_repo.name) + '{r:s}'.format(name=self.name, r=self.docker_repo.name) ) # Push to remote repo @@ -918,7 +947,7 @@ def __init__(self, name='default', **kwargs): mod_logger.info( "knot {name:s} pushed it's docker image to the repo " - "{id:s}".format(name=self.name, id=self.docker_repo.name) + "{r:s}".format(name=self.name, r=self.docker_repo.name) ) try: @@ -934,8 +963,8 @@ def __init__(self, name='default', **kwargs): ) mod_logger.info( - 'knot {name:s} created job definition {id:s}'.format( - name=self.name, id=self.job_definition.name + 'knot {name:s} created job definition {jd:s}'.format( + name=self.name, jd=self.job_definition.name ) ) @@ -984,8 +1013,8 @@ def __init__(self, name='default', **kwargs): jd_cleanup = False mod_logger.info( - 'knot {name:s} adopted job definition {id:s}'.format( - name=self.name, id=self.job_definition.name + 'knot {name:s} adopted job definition {jd:s}'.format( + name=self.name, jd=self.job_definition.name ) ) @@ -1015,8 +1044,8 @@ def __init__(self, name='default', **kwargs): ) mod_logger.info( - 'knot {name:s} created compute environment {id:s}'.format( - name=self.name, id=self.compute_environment.name + 'knot {name:s} created compute environment {ce:s}'.format( + name=self.name, ce=self.compute_environment.name ) ) @@ -1088,8 +1117,8 @@ def __init__(self, name='default', **kwargs): # ce_cleanup logic same as for jd_cleanup ce_cleanup = False mod_logger.info( - 'knot {name:s} adopted compute environment {id:s}'.format( - name=self.name, id=self.compute_environment.name + 'knot {name:s} adopted compute environment {ce:s}'.format( + name=self.name, ce=self.compute_environment.name ) ) @@ -1103,7 +1132,7 @@ def __init__(self, name='default', **kwargs): mod_logger.info( 'knot {name:s} created job queue ' - '{id:s}'.format(name=self.name, id=self.job_queue.name) + '{jq:s}'.format(name=self.name, jq=self.job_queue.name) ) except aws.ResourceExistsException as e: # Job queue already exists, retrieve it @@ -1140,7 +1169,7 @@ def __init__(self, name='default', **kwargs): self._job_queue = jq mod_logger.info( 'knot {name:s} adopted job queue ' - '{id:s}'.format(name=self.name, id=self.job_queue.name) + '{jq:s}'.format(name=self.name, jq=self.job_queue.name) ) self._jobs = [] @@ -1151,6 +1180,7 @@ def __init__(self, name='default', **kwargs): config = configparser.ConfigParser() config.read(get_config_file()) config.add_section(self._knot_name) + config.set(self._knot_name, 'region', self.region) config.set(self._knot_name, 'pars', self.pars.name) config.set(self._knot_name, 'docker-image', self.docker_image.name) config.set(self._knot_name, 'docker-repo', self.docker_repo.name) @@ -1189,6 +1219,9 @@ def submit(self, commands, env_vars): self.name ) + if self.region != aws.get_region(): + raise aws.RegionException(self.region) + # commands should be a sequence of sequences of strings if not all(all(isinstance(s, six.string_types) for s in sublist) for sublist in commands): @@ -1244,6 +1277,9 @@ def get_jobs(self): self.name ) + if self.region != aws.get_region(): + raise aws.RegionException(self.region) + jobs_info = [ { 'job': job, @@ -1264,6 +1300,9 @@ def view_jobs(self): self.name ) + if self.region != aws.get_region(): + raise aws.RegionException(self.region) + order = {'SUBMITTED': 0, 'PENDING': 1, 'RUNNABLE': 2, 'STARTING': 3, 'RUNNING': 4, 'FAILED': 5, 'SUCCEEDED': 6} job_info = sorted(self.get_jobs(), key=lambda j: order[j['status']]) @@ -1289,6 +1328,9 @@ def clobber(self, clobber_pars=False): ------- None """ + if self.region != aws.get_region(): + raise aws.RegionException(self.region) + # Delete all associated AWS resources for job in self.jobs: job.clobber() diff --git a/cloudknot/config.py b/cloudknot/config.py index 63ba45d3..830ac801 100644 --- a/cloudknot/config.py +++ b/cloudknot/config.py @@ -116,13 +116,14 @@ def verify_sections(): config.read(config_file) approved_sections = [ 'aws', 'roles', 'vpc', 'security-groups', 'docker-repos', - 'job-definitions', 'compute-environments', 'job-queues', 'batch-jobs' + 'job-definitions', 'compute-environments', 'job-queues', 'batch-jobs', + 'pars', 'knot', 'docker-image' ] def section_approved(sec): return any([ sec in approved_sections, - sec.split(' ', 1)[0] in ['pars', 'knot', 'docker-image'] + sec.split(' ', 1)[0] in approved_sections ]) for section in config.sections(): @@ -137,6 +138,9 @@ def prune(): ------- None """ + raise NotImplementedError('prune is not yet implemented.') + # prune needs to be updated to use the region info in config + verify_sections() config_file = get_config_file() @@ -197,4 +201,4 @@ def prune(): config.remove_option('jobs', job_id) # Prune pars - # Prune jars + # Prune knots diff --git a/cloudknot/tests/test_aws.py b/cloudknot/tests/test_aws.py index eab78685..182f4308 100644 --- a/cloudknot/tests/test_aws.py +++ b/cloudknot/tests/test_aws.py @@ -514,6 +514,7 @@ def test_IamRole(): # Clobber the role role.clobber() + # Assert that it was removed from AWS with pytest.raises(iam.exceptions.NoSuchEntityException): iam.get_role(RoleName=name) @@ -527,6 +528,10 @@ def test_IamRole(): config.read(config_file) assert name not in config.options('roles') + # Assert that reading the instance_profile_arn property raises error + with pytest.raises(ck.aws.ResourceClobberedException): + instance_profile_arn = role.instance_profile_arn # noqa: F841 + # Try to retrieve a role that does not exist name = get_testing_name() with pytest.raises(ck.aws.ResourceDoesNotExistException) as e: @@ -678,7 +683,7 @@ def test_DockerRepo(): # Confirm that the docker repo is in the config file config = configparser.ConfigParser() config.read(config_file) - assert name in config.options('docker-repos') + assert name in config.options('docker-repos ' + ck.aws.get_region()) # Clobber the docker repo dr.clobber() @@ -694,7 +699,9 @@ def test_DockerRepo(): # config and then re-read the file config = configparser.ConfigParser() config.read(config_file) - assert name not in config.options('docker-repos') + assert name not in config.options( + 'docker-repos ' + ck.aws.get_region() + ) # Now create a new repo using only cloudknot name = get_testing_name() @@ -714,7 +721,7 @@ def test_DockerRepo(): # Confirm that the docker repo is in the config file config = configparser.ConfigParser() config.read(config_file) - assert name in config.options('docker-repos') + assert name in config.options('docker-repos ' + ck.aws.get_region()) # Delete the repo from AWS before clobbering ecr.delete_repository( @@ -735,7 +742,9 @@ def test_DockerRepo(): # config and then re-read the file config = configparser.ConfigParser() config.read(config_file) - assert name not in config.options('docker-repos') + assert name not in config.options( + 'docker-repos ' + ck.aws.get_region() + ) except Exception as e: response = ecr.describe_repositories() @@ -754,9 +763,10 @@ def test_DockerRepo(): # Clean up config file config = configparser.ConfigParser() config.read(config_file) - for name in config.options('docker-repos'): + for name in config.options('docker-repos ' + ck.aws.get_region()): if UNIT_TEST_PREFIX in name: - config.remove_option('docker-repos', name) + config.remove_option('docker-repos ' + ck.aws.get_region(), + name) with open(config_file, 'w') as f: config.write(f) @@ -820,14 +830,22 @@ def test_Vpc(): # Confirm that the VPC is in the config file config = configparser.ConfigParser() config.read(config_file) - assert vpc_id in config.options('vpc') + assert vpc_id in config.options('vpc ' + ck.aws.get_region()) # Clobber the VPC vpc.clobber() + retry = tenacity.Retrying( + wait=tenacity.wait_exponential(max=64), + stop=tenacity.stop_after_delay(120), + retry=tenacity.retry_unless_exception_type( + ec2.exceptions.ClientError + ) + ) + # Assert that it was removed from AWS with pytest.raises(ec2.exceptions.ClientError) as e: - ec2.describe_vpcs(VpcIds=[vpc_id]) + retry.call(ec2.describe_vpcs, VpcIds=[vpc_id]) assert e.value.response.get('Error')['Code'] == 'InvalidVpcID.NotFound' @@ -838,7 +856,7 @@ def test_Vpc(): # config to None and then re-read the file config = configparser.ConfigParser() config.read(config_file) - assert vpc_id not in config.options('vpc') + assert vpc_id not in config.options('vpc ' + ck.aws.get_region()) # Try to retrieve a VPC that does not exist vpc_id = get_testing_name() @@ -867,26 +885,36 @@ def test_Vpc(): # Confirm that they exist in the config file config = configparser.ConfigParser() config.read(config_file) - assert vpc.vpc_id in config.options('vpc') + assert vpc.vpc_id in config.options('vpc ' + ck.aws.get_region()) - # Clobber security group + # Clobber the VPC vpc.clobber() - # Assert that it was removed from AWS - with pytest.raises(ec2.exceptions.ClientError) as e: - ec2.describe_vpcs(VpcIds=[vpc.vpc_id]) - - error_code = e.value.response.get('Error')['Code'] - assert error_code == 'InvalidVpcID.NotFound' - - # Assert that they were removed from the config file + # Assert that it was removed from the config file # If we just re-read the config file, config will keep the union # of the in memory values and the file values, updating the # intersection of the two with the file values. So we must clear # config and then re-read the file config = configparser.ConfigParser() config.read(config_file) - assert vpc.vpc_id not in config.options('vpc') + assert vpc.vpc_id not in config.options( + 'vpc ' + ck.aws.get_region() + ) + + retry = tenacity.Retrying( + wait=tenacity.wait_exponential(max=64), + stop=tenacity.stop_after_delay(120), + retry=tenacity.retry_unless_exception_type( + ec2.exceptions.ClientError + ) + ) + + # Assert that it was removed from AWS + with pytest.raises(ec2.exceptions.ClientError) as e: + retry.call(ec2.describe_vpcs, VpcIds=[vpc.vpc_id]) + + error_code = e.value.response.get('Error')['Code'] + assert error_code == 'InvalidVpcID.NotFound' # Create another vpc without a Name tag response = ec2.create_vpc( @@ -979,7 +1007,8 @@ def test_Vpc(): # Clean up config file try: - config.remove_option('vpc', vpc['VpcId']) + config.remove_option('vpc ' + ck.aws.get_region(), + vpc['VpcId']) except configparser.NoSectionError: pass @@ -1054,17 +1083,13 @@ def test_SecurityGroup(): # Confirm that the role is in the config file config = configparser.ConfigParser() config.read(config_file) - assert group_id in config.options('security-groups') + assert group_id in config.options( + 'security-groups ' + ck.aws.get_region() + ) # Clobber the role sg.clobber() - # Assert that it was removed from AWS - with pytest.raises(ec2.exceptions.ClientError) as e: - ec2.describe_security_groups(GroupIds=[group_id]) - - assert e.value.response.get('Error')['Code'] == 'InvalidGroup.NotFound' - # Assert that it was removed from the config file # If we just re-read the config file, config will keep the union # of the in memory values and the file values, updating the @@ -1072,7 +1097,23 @@ def test_SecurityGroup(): # config and then re-read the file config = configparser.ConfigParser() config.read(config_file) - assert group_id not in config.options('security-groups') + assert group_id not in config.options( + 'security-groups ' + ck.aws.get_region() + ) + + retry = tenacity.Retrying( + wait=tenacity.wait_exponential(max=64), + stop=tenacity.stop_after_delay(120), + retry=tenacity.retry_unless_exception_type( + ec2.exceptions.ClientError + ) + ) + + # Assert that it was removed from AWS + with pytest.raises(ec2.exceptions.ClientError) as e: + retry.call(ec2.describe_security_groups, GroupIds=[group_id]) + + assert e.value.response.get('Error')['Code'] == 'InvalidGroup.NotFound' # Try to retrieve a security group that does not exist group_id = get_testing_name() @@ -1105,14 +1146,25 @@ def test_SecurityGroup(): # Confirm that they exist in the config file config = configparser.ConfigParser() config.read(config_file) - assert sg.security_group_id in config.options('security-groups') + assert sg.security_group_id in config.options( + 'security-groups ' + ck.aws.get_region() + ) # Clobber security group sg.clobber() + retry = tenacity.Retrying( + wait=tenacity.wait_exponential(max=64), + stop=tenacity.stop_after_delay(120), + retry=tenacity.retry_unless_exception_type( + ec2.exceptions.ClientError + ) + ) + # Assert that it was removed from AWS with pytest.raises(ec2.exceptions.ClientError) as e: - ec2.describe_security_groups(GroupIds=[sg.security_group_id]) + retry.call(ec2.describe_security_groups, + GroupIds=[sg.security_group_id]) error_code = e.value.response.get('Error')['Code'] assert error_code == 'InvalidGroup.NotFound' @@ -1125,7 +1177,7 @@ def test_SecurityGroup(): config = configparser.ConfigParser() config.read(config_file) assert sg.security_group_id not in config.options( - 'security-groups' + 'security-groups ' + ck.aws.get_region() ) # Test for correct handling of incorrect input @@ -1169,7 +1221,8 @@ def test_SecurityGroup(): # Clean up config file try: - config.remove_option('security-groups', sg['id']) + config.remove_option('security-groups ' + ck.aws.get_region(), + sg['id']) except configparser.NoSectionError: pass @@ -1186,7 +1239,8 @@ def test_SecurityGroup(): # Clean up config file try: - config.remove_option('vpc', vpc['VpcId']) + config.remove_option('vpc ' + ck.aws.get_region(), + vpc['VpcId']) except configparser.NoSectionError: pass @@ -1259,7 +1313,19 @@ def test_JobDefinition(pars): # Confirm that the role is in the config file config = configparser.ConfigParser() config.read(config_file) - assert name in config.options('job-definitions') + assert name in config.options('job-definitions ' + ck.aws.get_region()) + + # Assert that clobber raises RegionException if we change the region + old_region = ck.get_region() + if old_region != 'us-east-2': + ck.set_region(region='us-east-2') + else: + ck.set_region(region='us-east-1') + + with pytest.raises(ck.aws.RegionException): + jd.clobber() + + ck.set_region(region=old_region) # Clobber the role jd.clobber() @@ -1276,7 +1342,9 @@ def test_JobDefinition(pars): # config and then re-read the file config = configparser.ConfigParser() config.read(config_file) - assert name not in config.options('job-definitions') + assert name not in config.options( + 'job-definitions ' + ck.aws.get_region() + ) # The previous job def should be INACTIVE, so try to retrieve it # and assert that we get a ResourceExistsException @@ -1333,7 +1401,9 @@ def test_JobDefinition(pars): # Confirm that they exist in the config file config = configparser.ConfigParser() config.read(config_file) - assert jd.name in config.options('job-definitions') + assert jd.name in config.options( + 'job-definitions ' + ck.aws.get_region() + ) # Clobber the job definition jd.clobber() @@ -1350,7 +1420,9 @@ def test_JobDefinition(pars): # config and then re-read the file config = configparser.ConfigParser() config.read(config_file) - assert jd.name not in config.options('job-definitions') + assert jd.name not in config.options( + 'job-definitions ' + ck.aws.get_region() + ) # Test for correct handling of incorrect input with pytest.raises(ValueError) as e: @@ -1429,7 +1501,8 @@ def test_JobDefinition(pars): # Clean up config file try: - config.remove_option('job-definitions', jd['name']) + config.remove_option('job-definitions ' + ck.aws.get_region(), + jd['name']) except configparser.NoSectionError: pass @@ -1520,7 +1593,9 @@ def test_ComputeEnvironment(pars): # Confirm that the role is in the config file config = configparser.ConfigParser() config.read(config_file) - assert name in config.options('compute-environments') + assert name in config.options( + 'compute-environments ' + ck.aws.get_region() + ) # Before clobbering, associate this compute environment with a # job queue in order to test the job queue disassociation statements @@ -1537,6 +1612,23 @@ def test_ComputeEnvironment(pars): assert e.value.resource_id[0]['jobQueueName'] == jq.name + # Assert that IamRole raises exception if we try to delete the + # batch service role on which this compute environment is based + with pytest.raises(ck.aws.CannotDeleteResourceException): + pars.batch_service_role.clobber() + + # Assert that clobber raises RegionException if we change the region + old_region = ck.get_region() + if old_region != 'us-east-2': + ck.set_region(region='us-east-2') + else: + ck.set_region(region='us-east-1') + + with pytest.raises(ck.aws.RegionException): + ce.clobber() + + ck.set_region(region=old_region) + jq.clobber() ce.clobber() @@ -1554,7 +1646,9 @@ def test_ComputeEnvironment(pars): # config and then re-read the file config = configparser.ConfigParser() config.read(config_file) - assert name not in config.options('compute-environments') + assert name not in config.options( + 'compute-environments ' + ck.aws.get_region() + ) # Try to retrieve a compute environment that does not exist nonexistent_arn = arn.replace( @@ -1639,7 +1733,9 @@ def test_ComputeEnvironment(pars): # Confirm that they exist in the config file config = configparser.ConfigParser() config.read(config_file) - assert ce.name in config.options('compute-environments') + assert ce.name in config.options( + 'compute-environments ' + ck.aws.get_region() + ) # Clobber compute environment ce.clobber() @@ -1658,7 +1754,9 @@ def test_ComputeEnvironment(pars): # config and then re-read the file config = configparser.ConfigParser() config.read(config_file) - assert ce.name not in config.options('compute-environments') + assert ce.name not in config.options( + 'compute-environments ' + ck.aws.get_region() + ) # Test for correct handling of incorrect input # ValueError for neither arn or name @@ -1918,7 +2016,8 @@ def test_ComputeEnvironment(pars): # Clean up config file try: - config.remove_option('job-queues', name) + config.remove_option('job-queues ' + ck.aws.get_region(), + name) except configparser.NoSectionError: pass @@ -1937,7 +2036,9 @@ def test_ComputeEnvironment(pars): # Clean up config file try: - config.remove_option('compute-environments', ce['name']) + config.remove_option( + 'compute-environments ' + ck.aws.get_region(), ce['name'] + ) except configparser.NoSectionError: pass @@ -2020,7 +2121,7 @@ def test_JobQueue(pars): # Confirm that the role is in the config file config = configparser.ConfigParser() config.read(config_file) - assert name in config.options('job-queues') + assert name in config.options('job-queues ' + ck.aws.get_region()) # Assert ValueError on invalid status in get_jobs() method with pytest.raises(ValueError): @@ -2029,9 +2130,28 @@ def test_JobQueue(pars): assert jq.get_jobs() == [] assert jq.get_jobs(status='STARTING') == [] + # Assert that clobber raises RegionException if we change the region + old_region = ck.get_region() + if old_region != 'us-east-2': + ck.set_region(region='us-east-2') + else: + ck.set_region(region='us-east-1') + + with pytest.raises(ck.aws.RegionException): + jq.clobber() + + with pytest.raises(ck.aws.RegionException): + jobs = jq.get_jobs() # noqa: F841 + + ck.set_region(region=old_region) + # Clobber the job queue jq.clobber() + # Assert that we can no longer get jobs after clobbering + with pytest.raises(ck.aws.ResourceClobberedException): + jobs = jq.get_jobs() # noqa: F841 + # Assert that it was removed from AWS response = batch.describe_job_queues(jobQueues=[arn]) response_jq = response.get('jobQueues') @@ -2044,7 +2164,7 @@ def test_JobQueue(pars): # config and then re-read the file config = configparser.ConfigParser() config.read(config_file) - assert name not in config.options('job-queues') + assert name not in config.options('job-queues ' + ck.aws.get_region()) # Try to retrieve a job queue that does not exist nonexistent_arn = arn.replace( @@ -2084,7 +2204,9 @@ def test_JobQueue(pars): # Confirm that they exist in the config file config = configparser.ConfigParser() config.read(config_file) - assert jq.name in config.options('job-queues') + assert jq.name in config.options( + 'job-queues ' + ck.aws.get_region() + ) # Clobber job queue jq.clobber() @@ -2103,7 +2225,9 @@ def test_JobQueue(pars): # config and then re-read the file config = configparser.ConfigParser() config.read(config_file) - assert jq.name not in config.options('job-queues') + assert jq.name not in config.options( + 'job-queues ' + ck.aws.get_region() + ) ce.clobber() ce2.clobber() @@ -2210,7 +2334,8 @@ def test_JobQueue(pars): # Clean up config file try: - config.remove_option('job-queues', name) + config.remove_option('job-queues ' + ck.aws.get_region(), + name) except configparser.NoSectionError: pass @@ -2229,7 +2354,9 @@ def test_JobQueue(pars): # Clean up config file try: - config.remove_option('compute-environments', ce['name']) + config.remove_option( + 'compute-environments ' + ck.aws.get_region(), ce['name'] + ) except configparser.NoSectionError: pass @@ -2290,7 +2417,8 @@ def test_JobQueue(pars): # Clean up config file try: - config.remove_option('job-queues', jq['name']) + config.remove_option('job-queues ' + ck.aws.get_region(), + jq['name']) except configparser.NoSectionError: pass diff --git a/cloudknot/tests/test_cloudknot.py b/cloudknot/tests/test_cloudknot.py index 0155c37f..911213b5 100644 --- a/cloudknot/tests/test_cloudknot.py +++ b/cloudknot/tests/test_cloudknot.py @@ -99,7 +99,7 @@ def cleanup(): # Clean up config file try: - config.remove_option('job-queues', name) + config.remove_option('job-queues ' + ck.aws.get_region(), name) except configparser.NoSectionError: pass @@ -118,7 +118,8 @@ def cleanup(): # Clean up config file try: - config.remove_option('compute-environments', ce['name']) + config.remove_option('compute-environments ' + ck.aws.get_region(), + ce['name']) except configparser.NoSectionError: pass @@ -181,7 +182,8 @@ def cleanup(): # Clean up config file try: - config.remove_option('job-queues', jq['name']) + config.remove_option('job-queues ' + ck.aws.get_region(), + jq['name']) except configparser.NoSectionError: pass @@ -225,7 +227,8 @@ def cleanup(): # Clean up config file try: - config.remove_option('job-definitions', jd['name']) + config.remove_option('job-definitions ' + ck.aws.get_region(), + jd['name']) except configparser.NoSectionError: pass @@ -254,7 +257,8 @@ def cleanup(): # Clean up config file try: - config.remove_option('security-groups', sg['id']) + config.remove_option('security-groups ' + ck.aws.get_region(), + sg['id']) except configparser.NoSectionError: pass @@ -299,7 +303,8 @@ def cleanup(): # Clean up config file try: - config.remove_option('vpc', vpc['VpcId']) + config.remove_option('vpc ' + ck.aws.get_region(), + vpc['VpcId']) except configparser.NoSectionError: pass diff --git a/cloudknot/tests/test_dockerimage.py b/cloudknot/tests/test_dockerimage.py index 64540d0c..5131f499 100644 --- a/cloudknot/tests/test_dockerimage.py +++ b/cloudknot/tests/test_dockerimage.py @@ -374,6 +374,14 @@ def test_DockerImage(): di.clobber() + # Assert error on build after clobber + with pytest.raises(ck.aws.ResourceClobberedException): + di.build(tags=['testing']) + + # Assert ValueError on push with invalid repo_uri + with pytest.raises(ck.aws.ResourceClobberedException): + di.push(repo=repo) + repo.clobber() except Exception as e: response = ecr.describe_repositories() @@ -413,9 +421,10 @@ def test_DockerImage(): config.remove_section(name) try: - for option in config.options('docker-repos'): + section_name = 'docker-repos' + ck.aws.get_region() + for option in config.options(section_name): if UNIT_TEST_PREFIX in option: - config.remove_option('docker-repos', option) + config.remove_option(section_name, option) except configparser.NoSectionError: pass