diff --git a/changelogs/fragments/1784-black.yml b/changelogs/fragments/1784-black.yml new file mode 100644 index 00000000000..981396488ef --- /dev/null +++ b/changelogs/fragments/1784-black.yml @@ -0,0 +1,2 @@ +minor_changes: +- The ``black`` code formatter has been run across the collection to improve code consistency (https://github.com/ansible-collections/community.aws/pull/1784). diff --git a/plugins/module_utils/base.py b/plugins/module_utils/base.py index 48e3132540c..6a549e47330 100644 --- a/plugins/module_utils/base.py +++ b/plugins/module_utils/base.py @@ -22,7 +22,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -class BaseWaiterFactory(): +class BaseWaiterFactory: """ A helper class used for creating additional waiters. Unlike the waiters available directly from botocore these waiters will @@ -39,6 +39,7 @@ class BaseWaiterFactory(): waiter = waiters.get_waiter('my_waiter_name') waiter.wait(**params) """ + module = None client = None @@ -113,9 +114,14 @@ def _waiter_model_data(self): def _inject_ratelimit_retries(self, model): extra_retries = [ - 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable', - 'InternalFailure', 'InternalError', 'TooManyRequestsException', - 'Throttling'] + "RequestLimitExceeded", + "Unavailable", + "ServiceUnavailable", + "InternalFailure", + "InternalError", + "TooManyRequestsException", + "Throttling", + ] acceptors = [] for error in extra_retries: @@ -130,15 +136,15 @@ def _inject_ratelimit_retries(self, model): def get_waiter(self, waiter_name): waiters = self._model.waiter_names if waiter_name not in waiters: - self.module.fail_json( - 'Unable to find waiter {0}. Available_waiters: {1}' - .format(waiter_name, waiters)) + self.module.fail_json("Unable to find waiter {0}. Available_waiters: {1}".format(waiter_name, waiters)) return botocore.waiter.create_waiter_with_client( - waiter_name, self._model, self.client, + waiter_name, + self._model, + self.client, ) -class Boto3Mixin(): +class Boto3Mixin: @staticmethod def aws_error_handler(description): r""" @@ -176,11 +182,15 @@ def handler(_self, *args, **kwargs): extra_ouput = _self._extra_error_output() try: return func(_self, *args, **kwargs) - except (botocore.exceptions.WaiterError) as e: - _self.module.fail_json_aws(e, msg='Failed waiting for {DESC}'.format(DESC=description), **extra_ouput) + except botocore.exceptions.WaiterError as e: + _self.module.fail_json_aws( + e, msg="Failed waiting for {DESC}".format(DESC=description), **extra_ouput + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - _self.module.fail_json_aws(e, msg='Failed to {DESC}'.format(DESC=description), **extra_ouput) + _self.module.fail_json_aws(e, msg="Failed to {DESC}".format(DESC=description), **extra_ouput) + return handler + return wrapper def _normalize_boto3_resource(self, resource, add_tags=False): @@ -198,7 +208,7 @@ def _normalize_boto3_resource(self, resource, add_tags=False): if resource is None: return None - tags = resource.get('Tags', None) + tags = resource.get("Tags", None) if tags: tags = boto3_tag_list_to_ansible_dict(tags) elif add_tags or tags is not None: @@ -206,7 +216,7 @@ def _normalize_boto3_resource(self, resource, add_tags=False): normalized_resource = camel_dict_to_snake_dict(resource) if tags is not None: - normalized_resource['tags'] = tags + normalized_resource["tags"] = tags return normalized_resource def _extra_error_output(self): @@ -261,9 +271,9 @@ def _waiter_config(self): params = dict() if self._wait_timeout: delay = min(5, self._wait_timeout) - max_attempts = (self._wait_timeout // delay) + max_attempts = self._wait_timeout // delay config = dict(Delay=delay, MaxAttempts=max_attempts) - params['WaiterConfig'] = config + params["WaiterConfig"] = config return params def _wait_for_deletion(self): @@ -346,8 +356,7 @@ def _set_resource_value(self, key, value, description=None, immutable=False): if immutable and self.original_resource: if description is None: description = key - self.module.fail_json(msg='{0} can not be updated after creation' - .format(description)) + self.module.fail_json(msg="{0} can not be updated after creation".format(description)) self._resource_updates[key] = value self.changed = True return True diff --git a/plugins/module_utils/ec2.py b/plugins/module_utils/ec2.py index 2874e278d9c..59b617f20f0 100644 --- a/plugins/module_utils/ec2.py +++ b/plugins/module_utils/ec2.py @@ -22,7 +22,7 @@ class Ec2WaiterFactory(BaseWaiterFactory): def __init__(self, module): # the AWSRetry wrapper doesn't support the wait functions (there's no # public call we can cleanly wrap) - client = module.client('ec2') + client = module.client("ec2") super(Ec2WaiterFactory, self).__init__(module, client) @property @@ -32,30 +32,28 @@ def _waiter_model_data(self): class Ec2Boto3Mixin(Boto3Mixin): - @AWSRetry.jittered_backoff() def _paginated_describe_subnets(self, **params): - paginator = self.client.get_paginator('describe_subnets') + paginator = self.client.get_paginator("describe_subnets") return paginator.paginate(**params).build_full_result() - @Boto3Mixin.aws_error_handler('describe subnets') + @Boto3Mixin.aws_error_handler("describe subnets") def _describe_subnets(self, **params): try: result = self._paginated_describe_subnets(**params) - except is_boto3_error_code('SubnetID.NotFound'): + except is_boto3_error_code("SubnetID.NotFound"): return None - return result.get('Subnets', None) + return result.get("Subnets", None) class BaseEc2Manager(Ec2Boto3Mixin, BaseResourceManager): - resource_id = None TAG_RESOURCE_TYPE = None # This can be overridden by a subclass *if* 'Tags' isn't returned as a part of # the standard Resource description TAGS_ON_RESOURCE = True # If the resource supports using "TagSpecifications" on creation we can - TAGS_ON_CREATE = 'TagSpecifications' + TAGS_ON_CREATE = "TagSpecifications" def __init__(self, module, id=None): r""" @@ -78,27 +76,27 @@ def _flush_update(self): changed |= super(BaseEc2Manager, self)._flush_update() return changed - @Boto3Mixin.aws_error_handler('connect to AWS') - def _create_client(self, client_name='ec2'): + @Boto3Mixin.aws_error_handler("connect to AWS") + def _create_client(self, client_name="ec2"): client = self.module.client(client_name, retry_decorator=AWSRetry.jittered_backoff()) return client - @Boto3Mixin.aws_error_handler('set tags on resource') + @Boto3Mixin.aws_error_handler("set tags on resource") def _add_tags(self, **params): self.client.create_tags(aws_retry=True, **params) return True - @Boto3Mixin.aws_error_handler('unset tags on resource') + @Boto3Mixin.aws_error_handler("unset tags on resource") def _remove_tags(self, **params): self.client.delete_tags(aws_retry=True, **params) return True @AWSRetry.jittered_backoff() def _paginated_describe_tags(self, **params): - paginator = self.client.get_paginator('describe_tags') + paginator = self.client.get_paginator("describe_tags") return paginator.paginate(**params).build_full_result() - @Boto3Mixin.aws_error_handler('list tags on resource') + @Boto3Mixin.aws_error_handler("list tags on resource") def _describe_tags(self, id=None): if not id: id = self.resource_id @@ -111,7 +109,7 @@ def _get_tags(self, id=None): id = self.resource_id # If the Tags are available from the resource, then use them if self.TAGS_ON_RESOURCE: - tags = self._preupdate_resource.get('Tags', []) + tags = self._preupdate_resource.get("Tags", []) # Otherwise we'll have to look them up else: tags = self._describe_tags(id=id) @@ -119,8 +117,8 @@ def _get_tags(self, id=None): def _do_tagging(self): changed = False - tags_to_add = self._tagging_updates.get('add') - tags_to_remove = self._tagging_updates.get('remove') + tags_to_add = self._tagging_updates.get("add") + tags_to_remove = self._tagging_updates.get("remove") if tags_to_add: changed = True @@ -136,25 +134,22 @@ def _do_tagging(self): return changed def _merge_resource_changes(self, filter_immutable=True, creation=False): - resource = super(BaseEc2Manager, self)._merge_resource_changes( - filter_immutable=filter_immutable, - creation=creation + filter_immutable=filter_immutable, creation=creation ) if creation: if not self.TAGS_ON_CREATE: - resource.pop('Tags', None) - elif self.TAGS_ON_CREATE == 'TagSpecifications': - tags = boto3_tag_list_to_ansible_dict(resource.pop('Tags', [])) + resource.pop("Tags", None) + elif self.TAGS_ON_CREATE == "TagSpecifications": + tags = boto3_tag_list_to_ansible_dict(resource.pop("Tags", [])) tag_specs = boto3_tag_specifications(tags, types=[self.TAG_RESOURCE_TYPE]) if tag_specs: - resource['TagSpecifications'] = tag_specs + resource["TagSpecifications"] = tag_specs return resource def set_tags(self, tags, purge_tags): - if tags is None: return False changed = False @@ -173,16 +168,16 @@ def set_tags(self, tags, purge_tags): tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags) if tags_to_add: - self._tagging_updates['add'] = tags_to_add + self._tagging_updates["add"] = tags_to_add changed = True if tags_to_remove: - self._tagging_updates['remove'] = tags_to_remove + self._tagging_updates["remove"] = tags_to_remove changed = True if changed: # Tags are a stored as a list, but treated like a list, the # simplisic '==' in _set_resource_value doesn't do the comparison # properly - return self._set_resource_value('Tags', ansible_dict_to_boto3_tag_list(desired_tags)) + return self._set_resource_value("Tags", ansible_dict_to_boto3_tag_list(desired_tags)) return False diff --git a/plugins/module_utils/etag.py b/plugins/module_utils/etag.py index 072a6514cc9..978111ba63a 100644 --- a/plugins/module_utils/etag.py +++ b/plugins/module_utils/etag.py @@ -24,6 +24,7 @@ try: from boto3.s3.transfer import TransferConfig + DEFAULT_CHUNK_SIZE = TransferConfig().multipart_chunksize except ImportError: DEFAULT_CHUNK_SIZE = 5 * 1024 * 1024 @@ -42,14 +43,13 @@ def calculate_multipart_etag(source_path, chunk_size=DEFAULT_CHUNK_SIZE): md5s = [] - with open(source_path, 'rb') as fp: + with open(source_path, "rb") as fp: while True: - data = fp.read(chunk_size) if not data: break - md5 = hashlib.new('md5', usedforsecurity=False) + md5 = hashlib.new("md5", usedforsecurity=False) md5.update(data) md5s.append(md5) diff --git a/plugins/module_utils/modules.py b/plugins/module_utils/modules.py index 1ff78758a9b..88f645c8695 100644 --- a/plugins/module_utils/modules.py +++ b/plugins/module_utils/modules.py @@ -10,9 +10,7 @@ class AnsibleCommunityAWSModule(AnsibleAWSModule): - def __init__(self, **kwargs): - super(AnsibleCommunityAWSModule, self).__init__(**kwargs) set_collection_info( collection_name=COMMUNITY_AWS_COLLECTION_NAME, diff --git a/plugins/module_utils/networkfirewall.py b/plugins/module_utils/networkfirewall.py index e4ffa91d814..cd4872907c6 100644 --- a/plugins/module_utils/networkfirewall.py +++ b/plugins/module_utils/networkfirewall.py @@ -62,7 +62,7 @@ class NetworkFirewallWaiterFactory(BaseWaiterFactory): def __init__(self, module): # the AWSRetry wrapper doesn't support the wait functions (there's no # public call we can cleanly wrap) - client = module.client('network-firewall') + client = module.client("network-firewall") super(NetworkFirewallWaiterFactory, self).__init__(module, client) @property @@ -70,63 +70,104 @@ def _waiter_model_data(self): data = super(NetworkFirewallWaiterFactory, self)._waiter_model_data nw_data = dict( rule_group_active=dict( - operation='DescribeRuleGroup', - delay=5, maxAttempts=120, + operation="DescribeRuleGroup", + delay=5, + maxAttempts=120, acceptors=[ - dict(state='failure', matcher='path', expected='DELETING', argument='RuleGroupResponse.RuleGroupStatus'), - dict(state='success', matcher='path', expected='ACTIVE', argument='RuleGroupResponse.RuleGroupStatus'), - ] + dict( + state="failure", + matcher="path", + expected="DELETING", + argument="RuleGroupResponse.RuleGroupStatus", + ), + dict( + state="success", matcher="path", expected="ACTIVE", argument="RuleGroupResponse.RuleGroupStatus" + ), + ], ), rule_group_deleted=dict( - operation='DescribeRuleGroup', - delay=5, maxAttempts=120, + operation="DescribeRuleGroup", + delay=5, + maxAttempts=120, acceptors=[ - dict(state='retry', matcher='path', expected='DELETING', argument='RuleGroupResponse.RuleGroupStatus'), - dict(state='success', matcher='error', expected='ResourceNotFoundException'), - ] + dict( + state="retry", matcher="path", expected="DELETING", argument="RuleGroupResponse.RuleGroupStatus" + ), + dict(state="success", matcher="error", expected="ResourceNotFoundException"), + ], ), policy_active=dict( - operation='DescribeFirewallPolicy', - delay=5, maxAttempts=120, + operation="DescribeFirewallPolicy", + delay=5, + maxAttempts=120, acceptors=[ - dict(state='failure', matcher='path', expected='DELETING', argument='FirewallPolicyResponse.FirewallPolicyStatus'), - dict(state='success', matcher='path', expected='ACTIVE', argument='FirewallPolicyResponse.FirewallPolicyStatus'), - ] + dict( + state="failure", + matcher="path", + expected="DELETING", + argument="FirewallPolicyResponse.FirewallPolicyStatus", + ), + dict( + state="success", + matcher="path", + expected="ACTIVE", + argument="FirewallPolicyResponse.FirewallPolicyStatus", + ), + ], ), policy_deleted=dict( - operation='DescribeFirewallPolicy', - delay=5, maxAttempts=120, + operation="DescribeFirewallPolicy", + delay=5, + maxAttempts=120, acceptors=[ - dict(state='retry', matcher='path', expected='DELETING', argument='FirewallPolicyResponse.FirewallPolicyStatus'), - dict(state='success', matcher='error', expected='ResourceNotFoundException'), - ] + dict( + state="retry", + matcher="path", + expected="DELETING", + argument="FirewallPolicyResponse.FirewallPolicyStatus", + ), + dict(state="success", matcher="error", expected="ResourceNotFoundException"), + ], ), firewall_active=dict( - operation='DescribeFirewall', - delay=5, maxAttempts=120, + operation="DescribeFirewall", + delay=5, + maxAttempts=120, acceptors=[ - dict(state='failure', matcher='path', expected='DELETING', argument='FirewallStatus.Status'), - dict(state='retry', matcher='path', expected='PROVISIONING', argument='FirewallStatus.Status'), - dict(state='success', matcher='path', expected='READY', argument='FirewallStatus.Status'), - ] + dict(state="failure", matcher="path", expected="DELETING", argument="FirewallStatus.Status"), + dict(state="retry", matcher="path", expected="PROVISIONING", argument="FirewallStatus.Status"), + dict(state="success", matcher="path", expected="READY", argument="FirewallStatus.Status"), + ], ), firewall_updated=dict( - operation='DescribeFirewall', - delay=5, maxAttempts=240, + operation="DescribeFirewall", + delay=5, + maxAttempts=240, acceptors=[ - dict(state='failure', matcher='path', expected='DELETING', argument='FirewallStatus.Status'), - dict(state='retry', matcher='path', expected='PROVISIONING', argument='FirewallStatus.Status'), - dict(state='retry', matcher='path', expected='PENDING', argument='FirewallStatus.ConfigurationSyncStateSummary'), - dict(state='success', matcher='path', expected='IN_SYNC', argument='FirewallStatus.ConfigurationSyncStateSummary'), - ] + dict(state="failure", matcher="path", expected="DELETING", argument="FirewallStatus.Status"), + dict(state="retry", matcher="path", expected="PROVISIONING", argument="FirewallStatus.Status"), + dict( + state="retry", + matcher="path", + expected="PENDING", + argument="FirewallStatus.ConfigurationSyncStateSummary", + ), + dict( + state="success", + matcher="path", + expected="IN_SYNC", + argument="FirewallStatus.ConfigurationSyncStateSummary", + ), + ], ), firewall_deleted=dict( - operation='DescribeFirewall', - delay=5, maxAttempts=240, + operation="DescribeFirewall", + delay=5, + maxAttempts=240, acceptors=[ - dict(state='retry', matcher='path', expected='DELETING', argument='FirewallStatus.Status'), - dict(state='success', matcher='error', expected='ResourceNotFoundException'), - ] + dict(state="retry", matcher="path", expected="DELETING", argument="FirewallStatus.Status"), + dict(state="success", matcher="error", expected="ResourceNotFoundException"), + ], ), ) data.update(nw_data) @@ -149,65 +190,65 @@ class NFRuleGroupBoto3Mixin(NetworkFirewallBoto3Mixin): # retry - retries the full fetch, but better than simply giving up. @AWSRetry.jittered_backoff() def _paginated_list_rule_groups(self, **params): - paginator = self.client.get_paginator('list_rule_groups') + paginator = self.client.get_paginator("list_rule_groups") result = paginator.paginate(**params).build_full_result() - return result.get('RuleGroups', None) + return result.get("RuleGroups", None) - @Boto3Mixin.aws_error_handler('list all rule groups') + @Boto3Mixin.aws_error_handler("list all rule groups") def _list_rule_groups(self, **params): return self._paginated_list_rule_groups(**params) - @Boto3Mixin.aws_error_handler('describe rule group') + @Boto3Mixin.aws_error_handler("describe rule group") def _describe_rule_group(self, **params): try: result = self.client.describe_rule_group(aws_retry=True, **params) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return None - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - rule_group = result.get('RuleGroup', None) - metadata = result.get('RuleGroupResponse', None) + rule_group = result.get("RuleGroup", None) + metadata = result.get("RuleGroupResponse", None) return dict(RuleGroup=rule_group, RuleGroupMetadata=metadata) - @Boto3Mixin.aws_error_handler('create rule group') + @Boto3Mixin.aws_error_handler("create rule group") def _create_rule_group(self, **params): result = self.client.create_rule_group(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('RuleGroupResponse', None) + return result.get("RuleGroupResponse", None) - @Boto3Mixin.aws_error_handler('update rule group') + @Boto3Mixin.aws_error_handler("update rule group") def _update_rule_group(self, **params): - if self._update_token and 'UpdateToken' not in params: - params['UpdateToken'] = self._update_token + if self._update_token and "UpdateToken" not in params: + params["UpdateToken"] = self._update_token result = self.client.update_rule_group(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('RuleGroupResponse', None) + return result.get("RuleGroupResponse", None) - @Boto3Mixin.aws_error_handler('delete rule group') + @Boto3Mixin.aws_error_handler("delete rule group") def _delete_rule_group(self, **params): try: result = self.client.delete_rule_group(aws_retry=True, **params) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return None - return result.get('RuleGroupResponse', None) + return result.get("RuleGroupResponse", None) - @Boto3Mixin.aws_error_handler('firewall rule to finish deleting') + @Boto3Mixin.aws_error_handler("firewall rule to finish deleting") def _wait_rule_group_deleted(self, **params): - waiter = self.nf_waiter_factory.get_waiter('rule_group_deleted') + waiter = self.nf_waiter_factory.get_waiter("rule_group_deleted") waiter.wait(**params) - @Boto3Mixin.aws_error_handler('firewall rule to become active') + @Boto3Mixin.aws_error_handler("firewall rule to become active") def _wait_rule_group_active(self, **params): - waiter = self.nf_waiter_factory.get_waiter('rule_group_active') + waiter = self.nf_waiter_factory.get_waiter("rule_group_active") waiter.wait(**params) @@ -216,65 +257,65 @@ class NFPolicyBoto3Mixin(NetworkFirewallBoto3Mixin): # retry - retries the full fetch, but better than simply giving up. @AWSRetry.jittered_backoff() def _paginated_list_policies(self, **params): - paginator = self.client.get_paginator('list_firewall_policies') + paginator = self.client.get_paginator("list_firewall_policies") result = paginator.paginate(**params).build_full_result() - return result.get('FirewallPolicies', None) + return result.get("FirewallPolicies", None) - @Boto3Mixin.aws_error_handler('list all firewall policies') + @Boto3Mixin.aws_error_handler("list all firewall policies") def _list_policies(self, **params): return self._paginated_list_policies(**params) - @Boto3Mixin.aws_error_handler('describe firewall policy') + @Boto3Mixin.aws_error_handler("describe firewall policy") def _describe_policy(self, **params): try: result = self.client.describe_firewall_policy(aws_retry=True, **params) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return None - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - policy = result.get('FirewallPolicy', None) - metadata = result.get('FirewallPolicyResponse', None) + policy = result.get("FirewallPolicy", None) + metadata = result.get("FirewallPolicyResponse", None) return dict(FirewallPolicy=policy, FirewallPolicyMetadata=metadata) - @Boto3Mixin.aws_error_handler('create firewall policy') + @Boto3Mixin.aws_error_handler("create firewall policy") def _create_policy(self, **params): result = self.client.create_firewall_policy(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('FirewallPolicyResponse', None) + return result.get("FirewallPolicyResponse", None) - @Boto3Mixin.aws_error_handler('update firewall policy') + @Boto3Mixin.aws_error_handler("update firewall policy") def _update_policy(self, **params): - if self._update_token and 'UpdateToken' not in params: - params['UpdateToken'] = self._update_token + if self._update_token and "UpdateToken" not in params: + params["UpdateToken"] = self._update_token result = self.client.update_firewall_policy(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('FirewallPolicyResponse', None) + return result.get("FirewallPolicyResponse", None) - @Boto3Mixin.aws_error_handler('delete firewall policy') + @Boto3Mixin.aws_error_handler("delete firewall policy") def _delete_policy(self, **params): try: result = self.client.delete_firewall_policy(aws_retry=True, **params) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return None - return result.get('FirewallPolicyResponse', None) + return result.get("FirewallPolicyResponse", None) - @Boto3Mixin.aws_error_handler('firewall policy to finish deleting') + @Boto3Mixin.aws_error_handler("firewall policy to finish deleting") def _wait_policy_deleted(self, **params): - waiter = self.nf_waiter_factory.get_waiter('policy_deleted') + waiter = self.nf_waiter_factory.get_waiter("policy_deleted") waiter.wait(**params) - @Boto3Mixin.aws_error_handler('firewall policy to become active') + @Boto3Mixin.aws_error_handler("firewall policy to become active") def _wait_policy_active(self, **params): - waiter = self.nf_waiter_factory.get_waiter('policy_active') + waiter = self.nf_waiter_factory.get_waiter("policy_active") waiter.wait(**params) @@ -283,136 +324,136 @@ class NFFirewallBoto3Mixin(NetworkFirewallBoto3Mixin): # retry - retries the full fetch, but better than simply giving up. @AWSRetry.jittered_backoff() def _paginated_list_firewalls(self, **params): - paginator = self.client.get_paginator('list_firewalls') + paginator = self.client.get_paginator("list_firewalls") result = paginator.paginate(**params).build_full_result() - return result.get('Firewalls', None) + return result.get("Firewalls", None) - @Boto3Mixin.aws_error_handler('list all firewalls') + @Boto3Mixin.aws_error_handler("list all firewalls") def _list_firewalls(self, **params): return self._paginated_list_firewalls(**params) - @Boto3Mixin.aws_error_handler('describe firewall') + @Boto3Mixin.aws_error_handler("describe firewall") def _describe_firewall(self, **params): try: result = self.client.describe_firewall(aws_retry=True, **params) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return None - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - firewall = result.get('Firewall', None) - metadata = result.get('FirewallStatus', None) + firewall = result.get("Firewall", None) + metadata = result.get("FirewallStatus", None) return dict(Firewall=firewall, FirewallMetadata=metadata) - @Boto3Mixin.aws_error_handler('create firewall') + @Boto3Mixin.aws_error_handler("create firewall") def _create_firewall(self, **params): result = self.client.create_firewall(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('FirewallStatus', None) + return result.get("FirewallStatus", None) - @Boto3Mixin.aws_error_handler('update firewall description') + @Boto3Mixin.aws_error_handler("update firewall description") def _update_firewall_description(self, **params): - if self._update_token and 'UpdateToken' not in params: - params['UpdateToken'] = self._update_token + if self._update_token and "UpdateToken" not in params: + params["UpdateToken"] = self._update_token result = self.client.update_firewall_description(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('FirewallName', None) + return result.get("FirewallName", None) - @Boto3Mixin.aws_error_handler('update firewall subnet change protection') + @Boto3Mixin.aws_error_handler("update firewall subnet change protection") def _update_subnet_change_protection(self, **params): - if self._update_token and 'UpdateToken' not in params: - params['UpdateToken'] = self._update_token + if self._update_token and "UpdateToken" not in params: + params["UpdateToken"] = self._update_token result = self.client.update_subnet_change_protection(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('FirewallName', None) + return result.get("FirewallName", None) - @Boto3Mixin.aws_error_handler('update firewall policy change protection') + @Boto3Mixin.aws_error_handler("update firewall policy change protection") def _update_firewall_policy_change_protection(self, **params): - if self._update_token and 'UpdateToken' not in params: - params['UpdateToken'] = self._update_token + if self._update_token and "UpdateToken" not in params: + params["UpdateToken"] = self._update_token result = self.client.update_firewall_policy_change_protection(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('FirewallName', None) + return result.get("FirewallName", None) - @Boto3Mixin.aws_error_handler('update firewall deletion protection') + @Boto3Mixin.aws_error_handler("update firewall deletion protection") def _update_firewall_delete_protection(self, **params): - if self._update_token and 'UpdateToken' not in params: - params['UpdateToken'] = self._update_token + if self._update_token and "UpdateToken" not in params: + params["UpdateToken"] = self._update_token result = self.client.update_firewall_delete_protection(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('FirewallName', None) + return result.get("FirewallName", None) - @Boto3Mixin.aws_error_handler('associate policy with firewall') + @Boto3Mixin.aws_error_handler("associate policy with firewall") def _associate_firewall_policy(self, **params): - if self._update_token and 'UpdateToken' not in params: - params['UpdateToken'] = self._update_token + if self._update_token and "UpdateToken" not in params: + params["UpdateToken"] = self._update_token result = self.client.associate_firewall_policy(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('FirewallName', None) + return result.get("FirewallName", None) - @Boto3Mixin.aws_error_handler('associate subnets with firewall') + @Boto3Mixin.aws_error_handler("associate subnets with firewall") def _associate_subnets(self, **params): - if self._update_token and 'UpdateToken' not in params: - params['UpdateToken'] = self._update_token + if self._update_token and "UpdateToken" not in params: + params["UpdateToken"] = self._update_token result = self.client.associate_subnets(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('FirewallName', None) + return result.get("FirewallName", None) - @Boto3Mixin.aws_error_handler('disassociate subnets from firewall') + @Boto3Mixin.aws_error_handler("disassociate subnets from firewall") def _disassociate_subnets(self, **params): - if self._update_token and 'UpdateToken' not in params: - params['UpdateToken'] = self._update_token + if self._update_token and "UpdateToken" not in params: + params["UpdateToken"] = self._update_token result = self.client.disassociate_subnets(aws_retry=True, **params) - update_token = result.get('UpdateToken', None) + update_token = result.get("UpdateToken", None) if update_token: self._update_token = update_token - return result.get('FirewallName', None) + return result.get("FirewallName", None) - @Boto3Mixin.aws_error_handler('delete firewall') + @Boto3Mixin.aws_error_handler("delete firewall") def _delete_firewall(self, **params): try: result = self.client.delete_firewall(aws_retry=True, **params) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return None - return result.get('FirewallStatus', None) + return result.get("FirewallStatus", None) - @Boto3Mixin.aws_error_handler('firewall to finish deleting') + @Boto3Mixin.aws_error_handler("firewall to finish deleting") def _wait_firewall_deleted(self, **params): - waiter = self.nf_waiter_factory.get_waiter('firewall_deleted') + waiter = self.nf_waiter_factory.get_waiter("firewall_deleted") waiter.wait(**params) - @Boto3Mixin.aws_error_handler('firewall to finish updating') + @Boto3Mixin.aws_error_handler("firewall to finish updating") def _wait_firewall_updated(self, **params): - waiter = self.nf_waiter_factory.get_waiter('firewall_updated') + waiter = self.nf_waiter_factory.get_waiter("firewall_updated") waiter.wait(**params) - @Boto3Mixin.aws_error_handler('firewall to become active') + @Boto3Mixin.aws_error_handler("firewall to become active") def _wait_firewall_active(self, **params): - waiter = self.nf_waiter_factory.get_waiter('firewall_active') + waiter = self.nf_waiter_factory.get_waiter("firewall_active") waiter.wait(**params) @@ -432,8 +473,8 @@ def __init__(self, module): self._metadata_updates = dict() self._tagging_updates = dict() - @Boto3Mixin.aws_error_handler('connect to AWS') - def _create_client(self, client_name='network-firewall'): + @Boto3Mixin.aws_error_handler("connect to AWS") + def _create_client(self, client_name="network-firewall"): client = self.module.client(client_name, retry_decorator=AWSRetry.jittered_backoff()) return client @@ -484,18 +525,18 @@ def _flush_update(self): self._metadata_updates = dict() return changed - @BaseResourceManager.aws_error_handler('set tags on resource') + @BaseResourceManager.aws_error_handler("set tags on resource") def _add_tags(self, **params): self.client.tag_resource(aws_retry=True, **params) return True - @BaseResourceManager.aws_error_handler('unset tags on resource') + @BaseResourceManager.aws_error_handler("unset tags on resource") def _remove_tags(self, **params): self.client.untag_resource(aws_retry=True, **params) return True def _get_preupdate_arn(self): - return self._preupdate_metadata.get('Arn') + return self._preupdate_metadata.get("Arn") def _set_metadata_value(self, key, value, description=None, immutable=False): if value is None: @@ -505,8 +546,7 @@ def _set_metadata_value(self, key, value, description=None, immutable=False): if immutable and self.original_resource: if description is None: description = key - self.module.fail_json(msg='{0} can not be updated after creation' - .format(description)) + self.module.fail_json(msg="{0} can not be updated after creation".format(description)) self._metadata_updates[key] = value self.changed = True return True @@ -515,15 +555,15 @@ def _get_metadata_value(self, key, default=None): return self._metadata_updates.get(key, self._preupdate_metadata.get(key, default)) def _set_tag_values(self, desired_tags): - return self._set_metadata_value('Tags', ansible_dict_to_boto3_tag_list(desired_tags)) + return self._set_metadata_value("Tags", ansible_dict_to_boto3_tag_list(desired_tags)) def _get_tag_values(self): - return self._get_metadata_value('Tags', []) + return self._get_metadata_value("Tags", []) def _flush_tagging(self): changed = False - tags_to_add = self._tagging_updates.get('add') - tags_to_remove = self._tagging_updates.get('remove') + tags_to_add = self._tagging_updates.get("add") + tags_to_remove = self._tagging_updates.get("remove") resource_arn = self._get_preupdate_arn() if not resource_arn: @@ -542,7 +582,6 @@ def _flush_tagging(self): return changed def set_tags(self, tags, purge_tags): - if tags is None: return False changed = False @@ -561,10 +600,10 @@ def set_tags(self, tags, purge_tags): tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags) if tags_to_add: - self._tagging_updates['add'] = tags_to_add + self._tagging_updates["add"] = tags_to_add changed = True if tags_to_remove: - self._tagging_updates['remove'] = tags_to_remove + self._tagging_updates["remove"] = tags_to_remove changed = True if changed: @@ -577,9 +616,7 @@ def set_tags(self, tags, purge_tags): class NetworkFirewallRuleManager(NFRuleGroupBoto3Mixin, BaseNetworkFirewallManager): - - RULE_TYPES = frozenset(['StatelessRulesAndCustomActions', 'StatefulRules', - 'RulesSourceList', 'RulesString']) + RULE_TYPES = frozenset(["StatelessRulesAndCustomActions", "StatefulRules", "RulesSourceList", "RulesString"]) name = None rule_type = None @@ -598,28 +635,28 @@ def __init__(self, module, name=None, rule_type=None, arn=None): def _extra_error_output(self): output = super(NetworkFirewallRuleManager, self)._extra_error_output() if self.name: - output['RuleGroupName'] = self.name + output["RuleGroupName"] = self.name if self.rule_type: - output['Type'] = self.rule_type + output["Type"] = self.rule_type if self.arn: - output['RuleGroupArn'] = self.arn + output["RuleGroupArn"] = self.arn return output def _filter_immutable_metadata_attributes(self, metadata): metadata = super(NetworkFirewallRuleManager, self)._filter_immutable_metadata_attributes(metadata) - metadata.pop('RuleGroupArn', None) - metadata.pop('RuleGroupName', None) - metadata.pop('RuleGroupId', None) - metadata.pop('Type', None) - metadata.pop('Capacity', None) - metadata.pop('RuleGroupStatus', None) - metadata.pop('Tags', None) - metadata.pop('ConsumedCapacity', None) - metadata.pop('NumberOfAssociations', None) + metadata.pop("RuleGroupArn", None) + metadata.pop("RuleGroupName", None) + metadata.pop("RuleGroupId", None) + metadata.pop("Type", None) + metadata.pop("Capacity", None) + metadata.pop("RuleGroupStatus", None) + metadata.pop("Tags", None) + metadata.pop("ConsumedCapacity", None) + metadata.pop("NumberOfAssociations", None) return metadata def _get_preupdate_arn(self): - return self._get_metadata_value('RuleGroupArn') + return self._get_metadata_value("RuleGroupArn") def _get_id_params(self, name=None, rule_type=None, arn=None): if arn: @@ -634,7 +671,7 @@ def _get_id_params(self, name=None, rule_type=None, arn=None): rule_type = rule_type.upper() if not rule_type or not name: # Users should never see this, but let's cover ourself - self.module.fail_json(msg='Rule identifier parameters missing') + self.module.fail_json(msg="Rule identifier parameters missing") return dict(RuleGroupName=name, Type=rule_type) @staticmethod @@ -646,7 +683,6 @@ def _transform_rule_variables(variables): return {k: dict(Definition=_string_list(v)) for (k, v) in variables.items()} def delete(self, name=None, rule_type=None, arn=None): - id_params = self._get_id_params(name=name, rule_type=rule_type, arn=arn) result = self._get_rule_group(**id_params) @@ -656,8 +692,8 @@ def delete(self, name=None, rule_type=None, arn=None): self.updated_resource = dict() # Rule Group is already in the process of being deleted (takes time) - rule_status = self._get_metadata_value('RuleGroupStatus', '').upper() - if rule_status == 'DELETING': + rule_status = self._get_metadata_value("RuleGroupStatus", "").upper() + if rule_status == "DELETING": self._wait_for_deletion() return False @@ -674,37 +710,37 @@ def list(self, scope=None): params = dict() if scope: scope = scope.upper() - params['Scope'] = scope + params["Scope"] = scope rule_groups = self._list_rule_groups(**params) if not rule_groups: return list() - return [r.get('Arn', None) for r in rule_groups] + return [r.get("Arn", None) for r in rule_groups] def _normalize_rule_variable(self, variable): if variable is None: return None - return {k: variable.get(k, dict()).get('Definition', []) for k in variable.keys()} + return {k: variable.get(k, dict()).get("Definition", []) for k in variable.keys()} def _normalize_rule_variables(self, variables): if variables is None: return None result = dict() - ip_sets = self._normalize_rule_variable(variables.get('IPSets', None)) + ip_sets = self._normalize_rule_variable(variables.get("IPSets", None)) if ip_sets: - result['ip_sets'] = ip_sets - port_sets = self._normalize_rule_variable(variables.get('PortSets', None)) + result["ip_sets"] = ip_sets + port_sets = self._normalize_rule_variable(variables.get("PortSets", None)) if port_sets: - result['port_sets'] = port_sets + result["port_sets"] = port_sets return result def _normalize_rule_group(self, rule_group): if rule_group is None: return None - rule_variables = self._normalize_rule_variables(rule_group.get('RuleVariables', None)) + rule_variables = self._normalize_rule_variables(rule_group.get("RuleVariables", None)) rule_group = self._normalize_boto3_resource(rule_group) if rule_variables is not None: - rule_group['rule_variables'] = rule_variables + rule_group["rule_variables"] = rule_variables return rule_group def _normalize_rule_group_metadata(self, rule_group_metadata): @@ -713,20 +749,19 @@ def _normalize_rule_group_metadata(self, rule_group_metadata): def _normalize_rule_group_result(self, result): if result is None: return None - rule_group = self._normalize_rule_group(result.get('RuleGroup', None)) - rule_group_metadata = self._normalize_rule_group_metadata(result.get('RuleGroupMetadata', None)) + rule_group = self._normalize_rule_group(result.get("RuleGroup", None)) + rule_group_metadata = self._normalize_rule_group_metadata(result.get("RuleGroupMetadata", None)) result = camel_dict_to_snake_dict(result) if rule_group: - result['rule_group'] = rule_group + result["rule_group"] = rule_group if rule_group_metadata: - result['rule_group_metadata'] = rule_group_metadata + result["rule_group_metadata"] = rule_group_metadata return result def _normalize_resource(self, resource): return self._normalize_rule_group_result(resource) def get_rule_group(self, name=None, rule_type=None, arn=None): - id_params = self._get_id_params(name=name, rule_type=rule_type, arn=arn) result = self._get_rule_group(**id_params) @@ -737,35 +772,32 @@ def get_rule_group(self, name=None, rule_type=None, arn=None): return rule_group def set_description(self, description): - return self._set_metadata_value('Description', description) + return self._set_metadata_value("Description", description) def set_capacity(self, capacity): - return self._set_metadata_value( - 'Capacity', capacity, - description="Reserved Capacity", immutable=True) + return self._set_metadata_value("Capacity", capacity, description="Reserved Capacity", immutable=True) def _set_rule_option(self, option_name, description, value, immutable=False, default_value=None): if value is None: return False - rule_options = deepcopy(self._get_resource_value('StatefulRuleOptions', dict())) + rule_options = deepcopy(self._get_resource_value("StatefulRuleOptions", dict())) if value == rule_options.get(option_name, default_value): return False if immutable and self.original_resource: - self.module.fail_json(msg='{0} can not be updated after creation' - .format(description)) + self.module.fail_json(msg="{0} can not be updated after creation".format(description)) rule_options[option_name] = value - return self._set_resource_value('StatefulRuleOptions', rule_options) + return self._set_resource_value("StatefulRuleOptions", rule_options) def set_rule_order(self, order): RULE_ORDER_MAP = { - 'default': 'DEFAULT_ACTION_ORDER', - 'strict': 'STRICT_ORDER', + "default": "DEFAULT_ACTION_ORDER", + "strict": "STRICT_ORDER", } value = RULE_ORDER_MAP.get(order) - changed = self._set_rule_option('RuleOrder', 'Rule order', value, True, 'DEFAULT_ACTION_ORDER') + changed = self._set_rule_option("RuleOrder", "Rule order", value, True, "DEFAULT_ACTION_ORDER") self.changed |= changed return changed @@ -775,7 +807,7 @@ def _set_rule_variables(self, set_name, variables, purge): variables = self._transform_rule_variables(variables) - all_variables = deepcopy(self._get_resource_value('RuleVariables', self._empty_rule_variables())) + all_variables = deepcopy(self._get_resource_value("RuleVariables", self._empty_rule_variables())) current_variables = all_variables.get(set_name, dict()) updated_variables = _merge_dict(current_variables, variables, purge) @@ -785,49 +817,50 @@ def _set_rule_variables(self, set_name, variables, purge): all_variables[set_name] = updated_variables - return self._set_resource_value('RuleVariables', all_variables) + return self._set_resource_value("RuleVariables", all_variables) def set_ip_variables(self, variables, purge): - return self._set_rule_variables('IPSets', variables, purge) + return self._set_rule_variables("IPSets", variables, purge) def set_port_variables(self, variables, purge): - return self._set_rule_variables('PortSets', variables, purge) + return self._set_rule_variables("PortSets", variables, purge) def _set_rule_source(self, rule_type, rules): if not rules: return False conflicting_types = self.RULE_TYPES.difference({rule_type}) - rules_source = deepcopy(self._get_resource_value('RulesSource', dict())) + rules_source = deepcopy(self._get_resource_value("RulesSource", dict())) current_keys = set(rules_source.keys()) conflicting_rule_type = conflicting_types.intersection(current_keys) if conflicting_rule_type: - self.module.fail_json('Unable to add {0} rules, {1} rules already set' - .format(rule_type, " and ".join(conflicting_rule_type))) + self.module.fail_json( + "Unable to add {0} rules, {1} rules already set".format(rule_type, " and ".join(conflicting_rule_type)) + ) original_rules = rules_source.get(rule_type, None) if rules == original_rules: return False rules_source[rule_type] = rules - return self._set_resource_value('RulesSource', rules_source) + return self._set_resource_value("RulesSource", rules_source) def set_rule_string(self, rule): if rule is None: return False if not rule: - self.module.fail_json('Rule string must include at least one rule') + self.module.fail_json("Rule string must include at least one rule") rule = "\n".join(_string_list(rule)) - return self._set_rule_source('RulesString', rule) + return self._set_rule_source("RulesString", rule) def set_domain_list(self, options): if not options: return False changed = False - domain_names = options.get('domain_names') - home_net = options.get('source_ips', None) - action = options.get('action') - filter_http = options.get('filter_http', False) - filter_https = options.get('filter_https', False) + domain_names = options.get("domain_names") + home_net = options.get("source_ips", None) + action = options.get("action") + filter_http = options.get("filter_http", False) + filter_https = options.get("filter_https", False) if home_net: # Seems a little kludgy but the HOME_NET ip variable is how you @@ -839,14 +872,14 @@ def set_domain_list(self, options): # Perform some transformations target_types = [] if filter_http: - target_types.append('HTTP_HOST') + target_types.append("HTTP_HOST") if filter_https: - target_types.append('TLS_SNI') + target_types.append("TLS_SNI") - if action == 'allow': - action = 'ALLOWLIST' + if action == "allow": + action = "ALLOWLIST" else: - action = 'DENYLIST' + action = "DENYLIST" # Finally build the 'rule' rule = dict( @@ -854,37 +887,37 @@ def set_domain_list(self, options): TargetTypes=target_types, GeneratedRulesType=action, ) - changed |= self._set_rule_source('RulesSourceList', rule) + changed |= self._set_rule_source("RulesSourceList", rule) return changed def _format_rule_options(self, options, sid): formatted_options = [] - opt = dict(Keyword='sid:{0}'.format(sid)) + opt = dict(Keyword="sid:{0}".format(sid)) formatted_options.append(opt) if options: for option in sorted(options.keys()): opt = dict(Keyword=option) settings = options.get(option) if settings: - opt['Settings'] = _string_list(settings) + opt["Settings"] = _string_list(settings) formatted_options.append(opt) return formatted_options def _format_stateful_rule(self, rule): options = self._format_rule_options( - rule.get('rule_options', dict()), - rule.get('sid'), + rule.get("rule_options", dict()), + rule.get("sid"), ) formatted_rule = dict( - Action=rule.get('action').upper(), + Action=rule.get("action").upper(), RuleOptions=options, Header=dict( - Protocol=rule.get('protocol').upper(), - Source=rule.get('source'), - SourcePort=rule.get('source_port'), - Direction=rule.get('direction').upper(), - Destination=rule.get('destination'), - DestinationPort=rule.get('destination_port'), + Protocol=rule.get("protocol").upper(), + Source=rule.get("source"), + SourcePort=rule.get("source_port"), + Direction=rule.get("direction").upper(), + Destination=rule.get("destination"), + DestinationPort=rule.get("destination_port"), ), ) return formatted_rule @@ -893,40 +926,40 @@ def set_rule_list(self, rules): if rules is None: return False if not rules: - self.module.fail_json(msg='Rule list must include at least one rule') + self.module.fail_json(msg="Rule list must include at least one rule") formatted_rules = [self._format_stateful_rule(r) for r in rules] - return self._set_rule_source('StatefulRules', formatted_rules) + return self._set_rule_source("StatefulRules", formatted_rules) def _do_create_resource(self): metadata, resource = self._merge_changes(filter_metadata=False) params = metadata params.update(self._get_id_params()) - params['RuleGroup'] = resource + params["RuleGroup"] = resource response = self._create_rule_group(**params) return bool(response) def _generate_updated_resource(self): metadata, resource = self._merge_changes(filter_metadata=False) metadata.update(self._get_id_params()) - updated_resource = dict( - RuleGroup=resource, - RuleGroupMetadata=metadata - ) + updated_resource = dict(RuleGroup=resource, RuleGroupMetadata=metadata) return updated_resource def _flush_create(self): # Apply some pre-flight tests before trying to run the creation. - if 'Capacity' not in self._metadata_updates: - self.module.fail_json('Capacity must be provided when creating a new Rule Group') + if "Capacity" not in self._metadata_updates: + self.module.fail_json("Capacity must be provided when creating a new Rule Group") - rules_source = self._get_resource_value('RulesSource', dict()) + rules_source = self._get_resource_value("RulesSource", dict()) rule_type = self.RULE_TYPES.intersection(set(rules_source.keys())) if len(rule_type) != 1: - self.module.fail_json('Exactly one of rule strings, domain list or rule list' - ' must be provided when creating a new rule group', - rule_type=rule_type, keys=self._resource_updates.keys(), - types=self.RULE_TYPES) + self.module.fail_json( + "Exactly one of rule strings, domain list or rule list" + " must be provided when creating a new rule group", + rule_type=rule_type, + keys=self._resource_updates.keys(), + types=self.RULE_TYPES, + ) return super(NetworkFirewallRuleManager, self)._flush_create() @@ -941,7 +974,7 @@ def _do_update_resource(self): params = metadata params.update(self._get_id_params()) - params['RuleGroup'] = resource + params["RuleGroup"] = resource if not self.module.check_mode: response = self._update_rule_group(**params) @@ -959,8 +992,8 @@ def _get_rule_group(self, **params): if not result: return None - rule_group = result.get('RuleGroup', None) - metadata = result.get('RuleGroupMetadata', None) + rule_group = result.get("RuleGroup", None) + metadata = result.get("RuleGroupMetadata", None) self._preupdate_resource = deepcopy(rule_group) self._preupdate_metadata = deepcopy(metadata) return dict(RuleGroup=rule_group, RuleGroupMetadata=metadata) @@ -980,7 +1013,6 @@ def _do_deletion_wait(self, **params): class NetworkFirewallPolicyManager(NFPolicyBoto3Mixin, NFRuleGroupBoto3Mixin, BaseNetworkFirewallManager): - name = None arn = None _group_name_cache = None @@ -997,25 +1029,25 @@ def __init__(self, module, name=None, arn=None): def _extra_error_output(self): output = super(NetworkFirewallPolicyManager, self)._extra_error_output() if self.name: - output['FirewallPolicyName'] = self.name + output["FirewallPolicyName"] = self.name if self.arn: - output['FirewallPolicyArn'] = self.arn + output["FirewallPolicyArn"] = self.arn return output def _filter_immutable_metadata_attributes(self, metadata): metadata = super(NetworkFirewallPolicyManager, self)._filter_immutable_metadata_attributes(metadata) - metadata.pop('FirewallPolicyArn', None) - metadata.pop('FirewallPolicyName', None) - metadata.pop('FirewallPolicyId', None) - metadata.pop('FirewallPolicyStatus', None) - metadata.pop('ConsumedStatelessRuleCapacity', None) - metadata.pop('ConsumedStatefulRuleCapacity', None) - metadata.pop('Tags', None) - metadata.pop('NumberOfAssociations', None) + metadata.pop("FirewallPolicyArn", None) + metadata.pop("FirewallPolicyName", None) + metadata.pop("FirewallPolicyId", None) + metadata.pop("FirewallPolicyStatus", None) + metadata.pop("ConsumedStatelessRuleCapacity", None) + metadata.pop("ConsumedStatefulRuleCapacity", None) + metadata.pop("Tags", None) + metadata.pop("NumberOfAssociations", None) return metadata def _get_preupdate_arn(self): - return self._get_metadata_value('FirewallPolicyArn') + return self._get_metadata_value("FirewallPolicyArn") def _get_id_params(self, name=None, arn=None): if arn: @@ -1027,7 +1059,6 @@ def _get_id_params(self, name=None, arn=None): return dict(FirewallPolicyName=name) def delete(self, name=None, arn=None): - id_params = self._get_id_params(name=name, arn=arn) result = self._get_policy(**id_params) @@ -1037,8 +1068,8 @@ def delete(self, name=None, arn=None): self.updated_resource = dict() # Policy is already in the process of being deleted (takes time) - rule_status = self._get_metadata_value('FirewallPolicyStatus', '').upper() - if rule_status == 'DELETING': + rule_status = self._get_metadata_value("FirewallPolicyStatus", "").upper() + if rule_status == "DELETING": self._wait_for_deletion() return False @@ -1057,7 +1088,7 @@ def list(self): if not policies: return list() - return [p.get('Arn', None) for p in policies] + return [p.get("Arn", None) for p in policies] @property def _rule_group_name_cache(self): @@ -1067,16 +1098,16 @@ def _rule_group_name_cache(self): if not results: return dict() - group_cache = {r.get('Name', None): r.get('Arn', None) for r in results} + group_cache = {r.get("Name", None): r.get("Arn", None) for r in results} self._group_name_cache = group_cache return group_cache @property def _stateful_rule_order(self): - engine_options = self._get_resource_value('StatefulEngineOptions', None) + engine_options = self._get_resource_value("StatefulEngineOptions", None) if not engine_options: - return 'DEFAULT_ACTION_ORDER' - return engine_options.get('RuleOrder', 'DEFAULT_ACTION_ORDER') + return "DEFAULT_ACTION_ORDER" + return engine_options.get("RuleOrder", "DEFAULT_ACTION_ORDER") def _canonicalize_rule_group(self, name, group_type): """Iterates through a mixed list of ARNs and Names converting them to @@ -1084,20 +1115,22 @@ def _canonicalize_rule_group(self, name, group_type): """ arn = None # : is only valid in ARNs - if ':' in name: + if ":" in name: arn = name else: arn = self._rule_group_name_cache.get(name, None) if not arn: - self.module.fail_json('Unable to fetch ARN for rule group', name=name, - group_name_cache=self._rule_group_name_cache) + self.module.fail_json( + "Unable to fetch ARN for rule group", name=name, group_name_cache=self._rule_group_name_cache + ) arn_info = parse_aws_arn(arn) if not arn_info: - self.module.fail_json('Unable to parse ARN for rule group', arn=arn, arn_info=arn_info) - arn_type = arn_info['resource'].split('/')[0] + self.module.fail_json("Unable to parse ARN for rule group", arn=arn, arn_info=arn_info) + arn_type = arn_info["resource"].split("/")[0] if arn_type != group_type: - self.module.fail_json('Rule group not of expected type', name=name, - arn=arn, expected_type=group_type, found_type=arn_type) + self.module.fail_json( + "Rule group not of expected type", name=name, arn=arn, expected_type=group_type, found_type=arn_type + ) return arn @@ -1106,15 +1139,15 @@ def _format_rulegroup_references(self, groups, strict_order): for idx, arn in enumerate(groups): entry = dict(ResourceArn=arn) if strict_order: - entry['Priority'] = idx + 1 + entry["Priority"] = idx + 1 formated_groups.append(entry) return formated_groups def _rulegroup_references_list(self, groups): - return [g.get('ResourceArn') for g in groups] + return [g.get("ResourceArn") for g in groups] def _sorted_rulegroup_references_list(self, groups): - sorted_list = sorted(groups, key=lambda g: g.get('Priority', None)) + sorted_list = sorted(groups, key=lambda g: g.get("Priority", None)) return self._rulegroup_references_list(sorted_list) def _compare_rulegroup_references(self, current_groups, desired_groups, strict_order): @@ -1131,23 +1164,22 @@ def _set_engine_option(self, option_name, description, value, immutable=False, d if value is None: return False - engine_options = deepcopy(self._get_resource_value('StatefulEngineOptions', dict())) + engine_options = deepcopy(self._get_resource_value("StatefulEngineOptions", dict())) if value == engine_options.get(option_name, default_value): return False if immutable and self.original_resource: - self.module.fail_json(msg='{0} can not be updated after creation' - .format(description)) + self.module.fail_json(msg="{0} can not be updated after creation".format(description)) engine_options[option_name] = value - return self._set_resource_value('StatefulEngineOptions', engine_options) + return self._set_resource_value("StatefulEngineOptions", engine_options) def set_stateful_rule_order(self, order): RULE_ORDER_MAP = { - 'default': 'DEFAULT_ACTION_ORDER', - 'strict': 'STRICT_ORDER', + "default": "DEFAULT_ACTION_ORDER", + "strict": "STRICT_ORDER", } value = RULE_ORDER_MAP.get(order) - changed = self._set_engine_option('RuleOrder', 'Rule order', value, True, 'DEFAULT_ACTION_ORDER') + changed = self._set_engine_option("RuleOrder", "Rule order", value, True, "DEFAULT_ACTION_ORDER") self.changed |= changed return changed @@ -1162,14 +1194,11 @@ def _set_rule_groups(self, groups, group_type, parameter_name, strict_order): return self._set_resource_value(parameter_name, formated_groups) def set_stateful_rule_groups(self, groups): - strict_order = self._stateful_rule_order == 'STRICT_ORDER' - return self._set_rule_groups(groups, 'stateful-rulegroup', - 'StatefulRuleGroupReferences', - strict_order) + strict_order = self._stateful_rule_order == "STRICT_ORDER" + return self._set_rule_groups(groups, "stateful-rulegroup", "StatefulRuleGroupReferences", strict_order) def set_stateless_rule_groups(self, groups): - return self._set_rule_groups(groups, 'stateless-rulegroup', - 'StatelessRuleGroupReferences', True) + return self._set_rule_groups(groups, "stateless-rulegroup", "StatelessRuleGroupReferences", True) def set_default_actions(self, key, actions, valid_actions=None): if actions is None: @@ -1178,38 +1207,35 @@ def set_default_actions(self, key, actions, valid_actions=None): invalid_actions = list(set(actions) - set(valid_actions or [])) if valid_actions and invalid_actions: self.module.fail_json( - msg='{0} contains invalid actions'.format(key), - valid_actions=valid_actions, invalid_actions=invalid_actions, - actions=actions) + msg="{0} contains invalid actions".format(key), + valid_actions=valid_actions, + invalid_actions=invalid_actions, + actions=actions, + ) return self._set_resource_value(key, actions) def set_stateful_default_actions(self, actions): if actions is None: return False - if self._stateful_rule_order != 'STRICT_ORDER': - self.module.fail_json(msg='Stateful default actions can only be set when using strict rule order') + if self._stateful_rule_order != "STRICT_ORDER": + self.module.fail_json(msg="Stateful default actions can only be set when using strict rule order") - valid_actions = [ - 'aws:drop_strict', 'aws:drop_established', - 'aws:alert_strict', 'aws:alert_established' - ] - return self.set_default_actions('StatefulDefaultActions', actions, valid_actions) + valid_actions = ["aws:drop_strict", "aws:drop_established", "aws:alert_strict", "aws:alert_established"] + return self.set_default_actions("StatefulDefaultActions", actions, valid_actions) def _set_stateless_default_actions(self, key, actions): - valid_actions = [ - 'aws:pass', 'aws:drop', 'aws:forward_to_sfe' - ] - custom_actions = self._get_resource_value('StatelessCustomActions', dict()) - custom_action_names = [a['ActionName'] for a in custom_actions] + valid_actions = ["aws:pass", "aws:drop", "aws:forward_to_sfe"] + custom_actions = self._get_resource_value("StatelessCustomActions", dict()) + custom_action_names = [a["ActionName"] for a in custom_actions] valid_actions.extend(custom_action_names) return self.set_default_actions(key, actions, valid_actions) def set_stateless_default_actions(self, actions): - return self._set_stateless_default_actions('StatelessDefaultActions', actions) + return self._set_stateless_default_actions("StatelessDefaultActions", actions) def set_stateless_fragment_default_actions(self, actions): - return self._set_stateless_default_actions('StatelessFragmentDefaultActions', actions) + return self._set_stateless_default_actions("StatelessFragmentDefaultActions", actions) def _normalize_policy(self, policy): if policy is None: @@ -1225,20 +1251,19 @@ def _normalize_policy_metadata(self, policy_metadata): def _normalize_policy_result(self, result): if result is None: return None - policy = self._normalize_policy(result.get('FirewallPolicy', None)) - policy_metadata = self._normalize_policy_metadata(result.get('FirewallPolicyMetadata', None)) + policy = self._normalize_policy(result.get("FirewallPolicy", None)) + policy_metadata = self._normalize_policy_metadata(result.get("FirewallPolicyMetadata", None)) result = dict() if policy: - result['policy'] = policy + result["policy"] = policy if policy_metadata: - result['policy_metadata'] = policy_metadata + result["policy_metadata"] = policy_metadata return result def _normalize_resource(self, resource): return self._normalize_policy_result(resource) def get_policy(self, name=None, arn=None): - id_params = self._get_id_params(name=name, arn=arn) result = self._get_policy(**id_params) @@ -1250,21 +1275,21 @@ def get_policy(self, name=None, arn=None): def _format_custom_action(self, action): formatted_action = dict( - ActionName=action['name'], + ActionName=action["name"], ) action_definition = dict() - if 'publish_metric_dimension_value' in action: - values = _string_list(action['publish_metric_dimension_value']) + if "publish_metric_dimension_value" in action: + values = _string_list(action["publish_metric_dimension_value"]) dimensions = [dict(Value=v) for v in values] - action_definition['PublishMetricAction'] = dict( + action_definition["PublishMetricAction"] = dict( Dimensions=dimensions, ) if action_definition: - formatted_action['ActionDefinition'] = action_definition + formatted_action["ActionDefinition"] = action_definition return formatted_action def _custom_action_map(self, actions): - return {a['ActionName']: a['ActionDefinition'] for a in actions} + return {a["ActionName"]: a["ActionDefinition"] for a in actions} def set_custom_stateless_actions(self, actions, purge_actions): if actions is None: @@ -1272,9 +1297,7 @@ def set_custom_stateless_actions(self, actions, purge_actions): new_action_list = [self._format_custom_action(a) for a in actions] new_action_map = self._custom_action_map(new_action_list) - existing_action_map = self._custom_action_map( - self._get_resource_value('StatelessCustomActions', []) - ) + existing_action_map = self._custom_action_map(self._get_resource_value("StatelessCustomActions", [])) if purge_actions: desired_action_map = dict() else: @@ -1285,34 +1308,31 @@ def set_custom_stateless_actions(self, actions, purge_actions): return False action_list = [dict(ActionName=k, ActionDefinition=v) for k, v in desired_action_map.items()] - self._set_resource_value('StatelessCustomActions', action_list) + self._set_resource_value("StatelessCustomActions", action_list) def set_description(self, description): - return self._set_metadata_value('Description', description) + return self._set_metadata_value("Description", description) def _do_create_resource(self): metadata, resource = self._merge_changes(filter_metadata=False) params = metadata params.update(self._get_id_params()) - params['FirewallPolicy'] = resource + params["FirewallPolicy"] = resource response = self._create_policy(**params) return bool(response) def _generate_updated_resource(self): metadata, resource = self._merge_changes(filter_metadata=False) metadata.update(self._get_id_params()) - updated_resource = dict( - FirewallPolicy=resource, - FirewallPolicyMetadata=metadata - ) + updated_resource = dict(FirewallPolicy=resource, FirewallPolicyMetadata=metadata) return updated_resource def _flush_create(self): # Set some defaults - if self._get_resource_value('StatelessDefaultActions', None) is None: - self._set_resource_value('StatelessDefaultActions', ['aws:forward_to_sfe']) - if self._get_resource_value('StatelessFragmentDefaultActions', None) is None: - self._set_resource_value('StatelessFragmentDefaultActions', ['aws:forward_to_sfe']) + if self._get_resource_value("StatelessDefaultActions", None) is None: + self._set_resource_value("StatelessDefaultActions", ["aws:forward_to_sfe"]) + if self._get_resource_value("StatelessFragmentDefaultActions", None) is None: + self._set_resource_value("StatelessFragmentDefaultActions", ["aws:forward_to_sfe"]) return super(NetworkFirewallPolicyManager, self)._flush_create() def _do_update_resource(self): @@ -1326,7 +1346,7 @@ def _do_update_resource(self): params = metadata params.update(self._get_id_params()) - params['FirewallPolicy'] = resource + params["FirewallPolicy"] = resource if not self.module.check_mode: response = self._update_policy(**params) @@ -1344,13 +1364,13 @@ def _get_policy(self, **params): if not result: return None - policy = result.get('FirewallPolicy', None) + policy = result.get("FirewallPolicy", None) # During deletion, there's a phase where this will return Metadata but # no policy if policy is None: policy = dict() - metadata = result.get('FirewallPolicyMetadata', None) + metadata = result.get("FirewallPolicyMetadata", None) self._preupdate_resource = deepcopy(policy) self._preupdate_metadata = deepcopy(metadata) return dict(FirewallPolicy=policy, FirewallPolicyMetadata=metadata) @@ -1370,7 +1390,6 @@ def _do_deletion_wait(self, **params): class NetworkFirewallManager(NFFirewallBoto3Mixin, NFPolicyBoto3Mixin, BaseNetworkFirewallManager): - name = None arn = None ec2_manager = None @@ -1392,13 +1411,13 @@ def __init__(self, module, name=None, arn=None): def _extra_error_output(self): output = super(NetworkFirewallManager, self)._extra_error_output() if self.name: - output['FirewallName'] = self.name + output["FirewallName"] = self.name if self.arn: - output['FirewallArn'] = self.arn + output["FirewallArn"] = self.arn return output def _get_preupdate_arn(self): - return self._get_resource_value('FirewallArn') + return self._get_resource_value("FirewallArn") def _get_id_params(self, name=None, arn=None): if arn: @@ -1409,11 +1428,10 @@ def _get_id_params(self, name=None, arn=None): name = self.name if not name: # Users should never see this, but let's cover ourself - self.module.fail_json(msg='Firewall identifier parameters missing') + self.module.fail_json(msg="Firewall identifier parameters missing") return dict(FirewallName=name) def delete(self, name=None, arn=None): - id_params = self._get_id_params(name=name, arn=arn) result = self._get_firewall(**id_params) @@ -1423,8 +1441,8 @@ def delete(self, name=None, arn=None): self.updated_resource = dict() # Firewall is already in the process of being deleted (takes time) - firewall_status = self._get_metadata_value('Status', '').upper() - if firewall_status == 'DELETING': + firewall_status = self._get_metadata_value("Status", "").upper() + if firewall_status == "DELETING": self._wait_for_deletion() return False @@ -1432,9 +1450,10 @@ def delete(self, name=None, arn=None): self.changed = True return True - if 'DeleteProtection' in self._resource_updates: + if "DeleteProtection" in self._resource_updates: self._update_firewall_delete_protection( - DeleteProtection=self._resource_updates['DeleteProtection'], **id_params, + DeleteProtection=self._resource_updates["DeleteProtection"], + **id_params, ) result = self._delete_firewall(**id_params) @@ -1445,55 +1464,54 @@ def delete(self, name=None, arn=None): def list(self, vpc_ids=None): params = dict() if vpc_ids: - params['VpcIds'] = vpc_ids + params["VpcIds"] = vpc_ids firewalls = self._list_firewalls(**params) if not firewalls: return list() - return [f.get('FirewallArn', None) for f in firewalls] + return [f.get("FirewallArn", None) for f in firewalls] def _normalize_firewall(self, firewall): if firewall is None: return None - subnets = [s.get('SubnetId') for s in firewall.get('SubnetMappings', [])] + subnets = [s.get("SubnetId") for s in firewall.get("SubnetMappings", [])] firewall = self._normalize_boto3_resource(firewall, add_tags=True) - firewall['subnets'] = subnets + firewall["subnets"] = subnets return firewall def _normalize_sync_state_config(self, policy): return self._normalize_boto3_resource(policy) def _normalize_sync_state(self, state): - config = {k: self._normalize_sync_state_config(v) for k, v in state.pop('Config', {}).items()} + config = {k: self._normalize_sync_state_config(v) for k, v in state.pop("Config", {}).items()} state = self._normalize_boto3_resource(state) - state['config'] = config or {} + state["config"] = config or {} return state def _normalize_firewall_metadata(self, firewall_metadata): if firewall_metadata is None: return None - states = {k: self._normalize_sync_state(v) for k, v in firewall_metadata.pop('SyncStates', {}).items()} + states = {k: self._normalize_sync_state(v) for k, v in firewall_metadata.pop("SyncStates", {}).items()} metadata = self._normalize_boto3_resource(firewall_metadata, add_tags=False) - metadata['sync_states'] = states or {} + metadata["sync_states"] = states or {} return metadata def _normalize_firewall_result(self, result): if result is None: return None - firewall = self._normalize_firewall(result.get('Firewall', None)) - firewall_metadata = self._normalize_firewall_metadata(result.get('FirewallMetadata', None)) + firewall = self._normalize_firewall(result.get("Firewall", None)) + firewall_metadata = self._normalize_firewall_metadata(result.get("FirewallMetadata", None)) result = camel_dict_to_snake_dict(result) if firewall: - result['firewall'] = firewall + result["firewall"] = firewall if firewall_metadata: - result['firewall_metadata'] = firewall_metadata + result["firewall_metadata"] = firewall_metadata return result def _normalize_resource(self, resource): return self._normalize_firewall_result(resource) def get_firewall(self, name=None, arn=None): - id_params = self._get_id_params(name=name, arn=arn) result = self._get_firewall(**id_params) @@ -1505,8 +1523,8 @@ def get_firewall(self, name=None, arn=None): @property def _subnets(self): - subnet_mappings = self._get_resource_value('SubnetMappings', []) - subnets = [s.get('SubnetId') for s in subnet_mappings] + subnet_mappings = self._get_resource_value("SubnetMappings", []) + subnets = [s.get("SubnetId") for s in subnet_mappings] return subnets def _subnets_to_vpc(self, subnets, subnet_details=None): @@ -1514,11 +1532,13 @@ def _subnets_to_vpc(self, subnets, subnet_details=None): return None if not subnet_details: subnet_details = self.ec2_manager._describe_subnets(SubnetIds=list(subnets)) - vpcs = [s.get('VpcId') for s in subnet_details] + vpcs = [s.get("VpcId") for s in subnet_details] if len(set(vpcs)) > 1: self.module.fail_json( - msg='Firewall subnets may only be in one VPC, multiple VPCs found', - vpcs=list(set(vpcs)), subnets=subnet_details) + msg="Firewall subnets may only be in one VPC, multiple VPCs found", + vpcs=list(set(vpcs)), + subnets=subnet_details, + ) return vpcs[0] def _format_subnet_mapping(self, subnets): @@ -1534,7 +1554,7 @@ def _policy_name_cache(self): if not results: return dict() - policy_cache = {p.get('Name', None): p.get('Arn', None) for p in results} + policy_cache = {p.get("Name", None): p.get("Arn", None) for p in results} self._policy_list_cache = policy_cache return policy_cache @@ -1544,20 +1564,26 @@ def _canonicalize_policy(self, name): """ arn = None # : is only valid in ARNs - if ':' in name: + if ":" in name: arn = name else: arn = self._policy_name_cache.get(name, None) if not arn: - self.module.fail_json('Unable to fetch ARN for policy', name=name, - policy_name_cache=self._policy_name_cache) + self.module.fail_json( + "Unable to fetch ARN for policy", name=name, policy_name_cache=self._policy_name_cache + ) arn_info = parse_aws_arn(arn) if not arn_info: - self.module.fail_json('Unable to parse ARN for policy', arn=arn, arn_info=arn_info) - arn_type = arn_info['resource'].split('/')[0] - if arn_type != 'firewall-policy': - self.module.fail_json('Policy ARN not of expected resource type', name=name, - arn=arn, expected_type='firewall-policy', found_type=arn_type) + self.module.fail_json("Unable to parse ARN for policy", arn=arn, arn_info=arn_info) + arn_type = arn_info["resource"].split("/")[0] + if arn_type != "firewall-policy": + self.module.fail_json( + "Policy ARN not of expected resource type", + name=name, + arn=arn, + expected_type="firewall-policy", + found_type=arn_type, + ) return arn @@ -1568,15 +1594,15 @@ def set_policy(self, policy): # Because the canonicalization of a non-ARN policy name will require an API call, # try comparing the current name to the policy name we've been passed. # If they match we don't need to perform the lookup. - current_policy = self._get_resource_value('FirewallPolicyArn', None) + current_policy = self._get_resource_value("FirewallPolicyArn", None) if current_policy: arn_info = parse_aws_arn(current_policy) - current_name = arn_info['resource'].split('/')[-1] + current_name = arn_info["resource"].split("/")[-1] if current_name == policy: return False policy = self._canonicalize_policy(policy) - return self._set_resource_value('FirewallPolicyArn', policy) + return self._set_resource_value("FirewallPolicyArn", policy) def set_subnets(self, subnets, purge=True): if subnets is None: @@ -1592,31 +1618,31 @@ def set_subnets(self, subnets, purge=True): subnet_details = self.ec2_manager._describe_subnets(SubnetIds=list(desired_subnets)) vpc = self._subnets_to_vpc(desired_subnets, subnet_details) - self._set_resource_value('VpcId', vpc, description='firewall VPC', immutable=True) + self._set_resource_value("VpcId", vpc, description="firewall VPC", immutable=True) - azs = [s.get('AvailabilityZoneId') for s in subnet_details] + azs = [s.get("AvailabilityZoneId") for s in subnet_details] if len(azs) != len(set(azs)): self.module.fail_json( - msg='Only one subnet per availability zone may set.', - availability_zones=azs, subnets=subnet_details) + msg="Only one subnet per availability zone may set.", availability_zones=azs, subnets=subnet_details + ) subnets_to_add = list(desired_subnets.difference(current_subnets)) subnets_to_remove = list(current_subnets.difference(desired_subnets)) self._subnet_updates = dict(add=subnets_to_add, remove=subnets_to_remove) - self._set_resource_value('SubnetMappings', self._format_subnet_mapping(desired_subnets)) + self._set_resource_value("SubnetMappings", self._format_subnet_mapping(desired_subnets)) return True def set_policy_change_protection(self, protection): - return self._set_resource_value('FirewallPolicyChangeProtection', protection) + return self._set_resource_value("FirewallPolicyChangeProtection", protection) def set_subnet_change_protection(self, protection): - return self._set_resource_value('SubnetChangeProtection', protection) + return self._set_resource_value("SubnetChangeProtection", protection) def set_delete_protection(self, protection): - return self._set_resource_value('DeleteProtection', protection) + return self._set_resource_value("DeleteProtection", protection) def set_description(self, description): - return self._set_resource_value('Description', description) + return self._set_resource_value("Description", description) def _do_create_resource(self): metadata, resource = self._merge_changes(filter_metadata=False) @@ -1629,10 +1655,7 @@ def _do_create_resource(self): def _generate_updated_resource(self): metadata, resource = self._merge_changes(filter_metadata=False) resource.update(self._get_id_params()) - updated_resource = dict( - Firewall=resource, - FirewallMetadata=metadata - ) + updated_resource = dict(Firewall=resource, FirewallMetadata=metadata) return updated_resource def _flush_create(self): @@ -1654,59 +1677,60 @@ def _do_update_resource(self): # There's no tool for 'bulk' updates, we need to iterate through these # one at a time... - if 'Description' in resource_updates: + if "Description" in resource_updates: self._update_firewall_description( - Description=resource_updates['Description'], **id_params, + Description=resource_updates["Description"], + **id_params, ) - if 'DeleteProtection' in resource_updates: + if "DeleteProtection" in resource_updates: self._update_firewall_delete_protection( - DeleteProtection=resource_updates['DeleteProtection'], **id_params, + DeleteProtection=resource_updates["DeleteProtection"], + **id_params, ) # Disable Change Protection... # When disabling change protection, do so *before* making changes - if 'FirewallPolicyChangeProtection' in resource_updates: - if not self._get_resource_value('FirewallPolicyChangeProtection'): + if "FirewallPolicyChangeProtection" in resource_updates: + if not self._get_resource_value("FirewallPolicyChangeProtection"): self._update_firewall_policy_change_protection( - FirewallPolicyChangeProtection=resource_updates['FirewallPolicyChangeProtection'], **id_params, + FirewallPolicyChangeProtection=resource_updates["FirewallPolicyChangeProtection"], + **id_params, ) - if 'SubnetChangeProtection' in resource_updates: - if not self._get_resource_value('SubnetChangeProtection'): + if "SubnetChangeProtection" in resource_updates: + if not self._get_resource_value("SubnetChangeProtection"): self._update_subnet_change_protection( - SubnetChangeProtection=resource_updates['SubnetChangeProtection'], **id_params, + SubnetChangeProtection=resource_updates["SubnetChangeProtection"], + **id_params, ) # General Changes - if 'SubnetMappings' in resource_updates: + if "SubnetMappings" in resource_updates: self._slow_start_change = True - subnets_to_add = self._subnet_updates.get('add', None) - subnets_to_remove = self._subnet_updates.get('remove', None) + subnets_to_add = self._subnet_updates.get("add", None) + subnets_to_remove = self._subnet_updates.get("remove", None) if subnets_to_remove: - self._disassociate_subnets( - SubnetIds=subnets_to_remove, **id_params) + self._disassociate_subnets(SubnetIds=subnets_to_remove, **id_params) if subnets_to_add: subnets_to_add = self._format_subnet_mapping(subnets_to_add) - self._associate_subnets( - SubnetMappings=subnets_to_add, **id_params) + self._associate_subnets(SubnetMappings=subnets_to_add, **id_params) - if 'FirewallPolicyArn' in resource_updates: + if "FirewallPolicyArn" in resource_updates: self._slow_start_change = True - self._associate_firewall_policy( - FirewallPolicyArn=resource_updates['FirewallPolicyArn'], - **id_params - ) + self._associate_firewall_policy(FirewallPolicyArn=resource_updates["FirewallPolicyArn"], **id_params) # Enable Change Protection. # When enabling change protection, do so *after* making changes - if 'FirewallPolicyChangeProtection' in resource_updates: - if self._get_resource_value('FirewallPolicyChangeProtection'): + if "FirewallPolicyChangeProtection" in resource_updates: + if self._get_resource_value("FirewallPolicyChangeProtection"): self._update_firewall_policy_change_protection( - FirewallPolicyChangeProtection=resource_updates['FirewallPolicyChangeProtection'], **id_params, + FirewallPolicyChangeProtection=resource_updates["FirewallPolicyChangeProtection"], + **id_params, ) - if 'SubnetChangeProtection' in resource_updates: - if self._get_resource_value('SubnetChangeProtection'): + if "SubnetChangeProtection" in resource_updates: + if self._get_resource_value("SubnetChangeProtection"): self._update_subnet_change_protection( - SubnetChangeProtection=resource_updates['SubnetChangeProtection'], **id_params, + SubnetChangeProtection=resource_updates["SubnetChangeProtection"], + **id_params, ) return True @@ -1723,8 +1747,8 @@ def _get_firewall(self, **params): if not result: return None - firewall = result.get('Firewall', None) - metadata = result.get('FirewallMetadata', None) + firewall = result.get("Firewall", None) + metadata = result.get("FirewallMetadata", None) self._preupdate_resource = deepcopy(firewall) self._preupdate_metadata = deepcopy(metadata) return dict(Firewall=firewall, FirewallMetadata=metadata) @@ -1755,7 +1779,7 @@ def _do_update_wait(self, **params): # Unlike RuleGroups and Policies for some reason Firewalls have the tags set # directly on the resource. def _set_tag_values(self, desired_tags): - return self._set_resource_value('Tags', ansible_dict_to_boto3_tag_list(desired_tags)) + return self._set_resource_value("Tags", ansible_dict_to_boto3_tag_list(desired_tags)) def _get_tag_values(self): - return self._get_resource_value('Tags', []) + return self._get_resource_value("Tags", []) diff --git a/plugins/module_utils/opensearch.py b/plugins/module_utils/opensearch.py index b5e0bbfeb8c..b461669e3e2 100644 --- a/plugins/module_utils/opensearch.py +++ b/plugins/module_utils/opensearch.py @@ -30,7 +30,10 @@ def get_domain_status(client, module, domain_name): response = client.describe_domain(DomainName=domain_name) except is_boto3_error_code("ResourceNotFoundException"): return None - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't get domain {0}".format(domain_name)) return response["DomainStatus"] @@ -49,7 +52,10 @@ def get_domain_config(client, module, domain_name): response = client.describe_domain_config(DomainName=domain_name) except is_boto3_error_code("ResourceNotFoundException"): return (None, None) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't get domain {0}".format(domain_name)) domain_config = {} arn = None @@ -88,13 +94,9 @@ def normalize_opensearch(client, module, domain): convert the attributes from camel case to snake case, and return the object. """ try: - domain["Tags"] = boto3_tag_list_to_ansible_dict( - client.list_tags(ARN=domain["ARN"], aws_retry=True)["TagList"] - ) + domain["Tags"] = boto3_tag_list_to_ansible_dict(client.list_tags(ARN=domain["ARN"], aws_retry=True)["TagList"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, "Couldn't get tags for domain %s" % domain["domain_name"] - ) + module.fail_json_aws(e, "Couldn't get tags for domain %s" % domain["domain_name"]) except KeyError: module.fail_json(msg=str(domain)) @@ -128,16 +130,14 @@ def wait_for_domain_status(client, module, domain_name, waiter_name): return time.sleep(15) # Timeout occured. - module.fail_json( - msg=f"Timeout waiting for wait state '{waiter_name}'. {status_msg}" - ) + module.fail_json(msg=f"Timeout waiting for wait state '{waiter_name}'. {status_msg}") def parse_version(engine_version): - ''' + """ Parse the engine version, which should be Elasticsearch_X.Y or OpenSearch_X.Y Return dict { 'engine_type': engine_type, 'major': major, 'minor': minor } - ''' + """ version = engine_version.split("_") if len(version) != 2: return None @@ -145,19 +145,19 @@ def parse_version(engine_version): if len(semver) != 2: return None engine_type = version[0] - if engine_type not in ['Elasticsearch', 'OpenSearch']: + if engine_type not in ["Elasticsearch", "OpenSearch"]: return None if not (semver[0].isdigit() and semver[1].isdigit()): return None major = int(semver[0]) minor = int(semver[1]) - return {'engine_type': engine_type, 'major': major, 'minor': minor} + return {"engine_type": engine_type, "major": major, "minor": minor} def compare_domain_versions(version1, version2): supported_engines = { - 'Elasticsearch': 1, - 'OpenSearch': 2, + "Elasticsearch": 1, + "OpenSearch": 2, } if isinstance(version1, string_types): version1 = parse_version(version1) @@ -169,21 +169,21 @@ def compare_domain_versions(version1, version2): return 1 elif version1 is None and version2 is None: return 0 - e1 = supported_engines.get(version1.get('engine_type')) - e2 = supported_engines.get(version2.get('engine_type')) + e1 = supported_engines.get(version1.get("engine_type")) + e2 = supported_engines.get(version2.get("engine_type")) if e1 < e2: return -1 elif e1 > e2: return 1 else: - if version1.get('major') < version2.get('major'): + if version1.get("major") < version2.get("major"): return -1 - elif version1.get('major') > version2.get('major'): + elif version1.get("major") > version2.get("major"): return 1 else: - if version1.get('minor') < version2.get('minor'): + if version1.get("minor") < version2.get("minor"): return -1 - elif version1.get('minor') > version2.get('minor'): + elif version1.get("minor") > version2.get("minor"): return 1 else: return 0 @@ -203,22 +203,15 @@ def get_target_increment_version(client, module, domain_name, target_version): except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws( e, - msg="Couldn't get compatible versions for domain {0}".format( - domain_name), + msg="Couldn't get compatible versions for domain {0}".format(domain_name), ) - compat = api_compatible_versions.get('CompatibleVersions') + compat = api_compatible_versions.get("CompatibleVersions") if compat is None: - module.fail_json( - "Unable to determine list of compatible versions", - compatible_versions=api_compatible_versions) + module.fail_json("Unable to determine list of compatible versions", compatible_versions=api_compatible_versions) if len(compat) == 0: - module.fail_json( - "Unable to determine list of compatible versions", - compatible_versions=api_compatible_versions) + module.fail_json("Unable to determine list of compatible versions", compatible_versions=api_compatible_versions) if compat[0].get("TargetVersions") is None: - module.fail_json( - "No compatible versions found", - compatible_versions=api_compatible_versions) + module.fail_json("No compatible versions found", compatible_versions=api_compatible_versions) compatible_versions = [] for v in compat[0].get("TargetVersions"): if target_version == v: @@ -243,9 +236,7 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): changed = bool(tags_to_add or tags_to_remove) if tags_to_add: if module.check_mode: - module.exit_json( - changed=True, msg="Would have added tags to domain if not in check mode" - ) + module.exit_json(changed=True, msg="Would have added tags to domain if not in check mode") try: client.add_tags( ARN=resource_arn, @@ -255,21 +246,15 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: - module.fail_json_aws( - e, "Couldn't add tags to domain {0}".format(resource_arn) - ) + module.fail_json_aws(e, "Couldn't add tags to domain {0}".format(resource_arn)) if tags_to_remove: if module.check_mode: - module.exit_json( - changed=True, msg="Would have removed tags if not in check mode" - ) + module.exit_json(changed=True, msg="Would have removed tags if not in check mode") try: client.remove_tags(ARN=resource_arn, TagKeys=tags_to_remove) except ( botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: - module.fail_json_aws( - e, "Couldn't remove tags from domain {0}".format(resource_arn) - ) + module.fail_json_aws(e, "Couldn't remove tags from domain {0}".format(resource_arn)) return changed diff --git a/plugins/module_utils/sns.py b/plugins/module_utils/sns.py index baa816b1a3e..8088b1b9ece 100644 --- a/plugins/module_utils/sns.py +++ b/plugins/module_utils/sns.py @@ -22,33 +22,35 @@ @AWSRetry.jittered_backoff() def _list_topics_with_backoff(client): - paginator = client.get_paginator('list_topics') - return paginator.paginate().build_full_result()['Topics'] + paginator = client.get_paginator("list_topics") + return paginator.paginate().build_full_result()["Topics"] -@AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["NotFound"]) def _list_topic_subscriptions_with_backoff(client, topic_arn): - paginator = client.get_paginator('list_subscriptions_by_topic') - return paginator.paginate(TopicArn=topic_arn).build_full_result()['Subscriptions'] + paginator = client.get_paginator("list_subscriptions_by_topic") + return paginator.paginate(TopicArn=topic_arn).build_full_result()["Subscriptions"] -@AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["NotFound"]) def _list_subscriptions_with_backoff(client): - paginator = client.get_paginator('list_subscriptions') - return paginator.paginate().build_full_result()['Subscriptions'] + paginator = client.get_paginator("list_subscriptions") + return paginator.paginate().build_full_result()["Subscriptions"] def list_topic_subscriptions(client, module, topic_arn): try: return _list_topic_subscriptions_with_backoff(client, topic_arn) - except is_boto3_error_code('AuthorizationError'): + except is_boto3_error_code("AuthorizationError"): try: # potentially AuthorizationError when listing subscriptions for third party topic - return [sub for sub in _list_subscriptions_with_backoff(client) - if sub['TopicArn'] == topic_arn] + return [sub for sub in _list_subscriptions_with_backoff(client) if sub["TopicArn"] == topic_arn] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % topic_arn) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % topic_arn) @@ -57,13 +59,13 @@ def list_topics(client, module): topics = _list_topics_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get topic list") - return [t['TopicArn'] for t in topics] + return [t["TopicArn"] for t in topics] def topic_arn_lookup(client, module, name): # topic names cannot have colons, so this captures the full topic name all_topics = list_topics(client, module) - lookup_topic = ':%s' % name + lookup_topic = ":%s" % name for topic in all_topics: if topic.endswith(lookup_topic): return topic @@ -74,13 +76,13 @@ def compare_delivery_policies(policy_a, policy_b): _policy_b = copy.deepcopy(policy_b) # AWS automatically injects disableSubscriptionOverrides if you set an # http policy - if 'http' in policy_a: - if 'disableSubscriptionOverrides' not in policy_a['http']: - _policy_a['http']['disableSubscriptionOverrides'] = False - if 'http' in policy_b: - if 'disableSubscriptionOverrides' not in policy_b['http']: - _policy_b['http']['disableSubscriptionOverrides'] = False - comparison = (_policy_a != _policy_b) + if "http" in policy_a: + if "disableSubscriptionOverrides" not in policy_a["http"]: + _policy_a["http"]["disableSubscriptionOverrides"] = False + if "http" in policy_b: + if "disableSubscriptionOverrides" not in policy_b["http"]: + _policy_b["http"]["disableSubscriptionOverrides"] = False + comparison = _policy_a != _policy_b return comparison @@ -88,15 +90,15 @@ def canonicalize_endpoint(protocol, endpoint): # AWS SNS expects phone numbers in # and canonicalizes to E.164 format # See - if protocol == 'sms': - return re.sub('[^0-9+]*', '', endpoint) + if protocol == "sms": + return re.sub("[^0-9+]*", "", endpoint) return endpoint def get_tags(client, module, topic_arn): try: - return boto3_tag_list_to_ansible_dict(client.list_tags_for_resource(ResourceArn=topic_arn)['Tags']) - except is_boto3_error_code('AuthorizationError'): + return boto3_tag_list_to_ansible_dict(client.list_tags_for_resource(ResourceArn=topic_arn)["Tags"]) + except is_boto3_error_code("AuthorizationError"): module.warn("Permission denied accessing tags") return {} except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -104,52 +106,53 @@ def get_tags(client, module, topic_arn): def get_info(connection, module, topic_arn): - name = module.params.get('name') - topic_type = module.params.get('topic_type') - state = module.params.get('state') - subscriptions = module.params.get('subscriptions') - purge_subscriptions = module.params.get('purge_subscriptions') - content_based_deduplication = module.params.get('content_based_deduplication') - subscriptions_existing = module.params.get('subscriptions_existing', []) - subscriptions_deleted = module.params.get('subscriptions_deleted', []) - subscriptions_added = module.params.get('subscriptions_added', []) - subscriptions_added = module.params.get('subscriptions_added', []) - topic_created = module.params.get('topic_created', False) - topic_deleted = module.params.get('topic_deleted', False) - attributes_set = module.params.get('attributes_set', []) + name = module.params.get("name") + topic_type = module.params.get("topic_type") + state = module.params.get("state") + subscriptions = module.params.get("subscriptions") + purge_subscriptions = module.params.get("purge_subscriptions") + content_based_deduplication = module.params.get("content_based_deduplication") + subscriptions_existing = module.params.get("subscriptions_existing", []) + subscriptions_deleted = module.params.get("subscriptions_deleted", []) + subscriptions_added = module.params.get("subscriptions_added", []) + subscriptions_added = module.params.get("subscriptions_added", []) + topic_created = module.params.get("topic_created", False) + topic_deleted = module.params.get("topic_deleted", False) + attributes_set = module.params.get("attributes_set", []) check_mode = module.check_mode info = { - 'name': name, - 'topic_type': topic_type, - 'state': state, - 'subscriptions_new': subscriptions, - 'subscriptions_existing': subscriptions_existing, - 'subscriptions_deleted': subscriptions_deleted, - 'subscriptions_added': subscriptions_added, - 'subscriptions_purge': purge_subscriptions, - 'content_based_deduplication': content_based_deduplication, - 'check_mode': check_mode, - 'topic_created': topic_created, - 'topic_deleted': topic_deleted, - 'attributes_set': attributes_set, + "name": name, + "topic_type": topic_type, + "state": state, + "subscriptions_new": subscriptions, + "subscriptions_existing": subscriptions_existing, + "subscriptions_deleted": subscriptions_deleted, + "subscriptions_added": subscriptions_added, + "subscriptions_purge": purge_subscriptions, + "content_based_deduplication": content_based_deduplication, + "check_mode": check_mode, + "topic_created": topic_created, + "topic_deleted": topic_deleted, + "attributes_set": attributes_set, } - if state != 'absent': + if state != "absent": if topic_arn in list_topics(connection, module): - info.update(camel_dict_to_snake_dict(connection.get_topic_attributes(TopicArn=topic_arn)['Attributes'])) - info['delivery_policy'] = info.pop('effective_delivery_policy') - info['subscriptions'] = [camel_dict_to_snake_dict(sub) for sub in list_topic_subscriptions(connection, module, topic_arn)] + info.update(camel_dict_to_snake_dict(connection.get_topic_attributes(TopicArn=topic_arn)["Attributes"])) + info["delivery_policy"] = info.pop("effective_delivery_policy") + info["subscriptions"] = [ + camel_dict_to_snake_dict(sub) for sub in list_topic_subscriptions(connection, module, topic_arn) + ] info["tags"] = get_tags(connection, module, topic_arn) return info def update_tags(client, module, topic_arn): - - if module.params.get('tags') is None: + if module.params.get("tags") is None: return False existing_tags = get_tags(client, module, topic_arn) - to_update, to_delete = compare_aws_tags(existing_tags, module.params['tags'], module.params['purge_tags']) + to_update, to_delete = compare_aws_tags(existing_tags, module.params["tags"], module.params["purge_tags"]) if not bool(to_delete or to_update): return False @@ -159,8 +162,7 @@ def update_tags(client, module, topic_arn): if to_update: try: - client.tag_resource(ResourceArn=topic_arn, - Tags=ansible_dict_to_boto3_tag_list(to_update)) + client.tag_resource(ResourceArn=topic_arn, Tags=ansible_dict_to_boto3_tag_list(to_update)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't add tags to topic") if to_delete: diff --git a/plugins/module_utils/transitgateway.py b/plugins/module_utils/transitgateway.py index fff2ce63ffe..5f0e934d1f2 100644 --- a/plugins/module_utils/transitgateway.py +++ b/plugins/module_utils/transitgateway.py @@ -21,21 +21,43 @@ def _waiter_model_data(self): # split the TGW waiters so we can keep them close to everything else. tgw_data = dict( tgw_attachment_available=dict( - operation='DescribeTransitGatewayAttachments', - delay=5, maxAttempts=120, + operation="DescribeTransitGatewayAttachments", + delay=5, + maxAttempts=120, acceptors=[ - dict(state='success', matcher='pathAll', expected='available', argument='TransitGatewayAttachments[].State'), - ] + dict( + state="success", + matcher="pathAll", + expected="available", + argument="TransitGatewayAttachments[].State", + ), + ], ), tgw_attachment_deleted=dict( - operation='DescribeTransitGatewayAttachments', - delay=5, maxAttempts=120, + operation="DescribeTransitGatewayAttachments", + delay=5, + maxAttempts=120, acceptors=[ - dict(state='retry', matcher='pathAll', expected='deleting', argument='TransitGatewayAttachments[].State'), - dict(state='success', matcher='pathAll', expected='deleted', argument='TransitGatewayAttachments[].State'), - dict(state='success', matcher='path', expected=True, argument='length(TransitGatewayAttachments[]) == `0`'), - dict(state='success', matcher='error', expected='InvalidRouteTableID.NotFound'), - ] + dict( + state="retry", + matcher="pathAll", + expected="deleting", + argument="TransitGatewayAttachments[].State", + ), + dict( + state="success", + matcher="pathAll", + expected="deleted", + argument="TransitGatewayAttachments[].State", + ), + dict( + state="success", + matcher="path", + expected=True, + argument="length(TransitGatewayAttachments[]) == `0`", + ), + dict(state="success", matcher="error", expected="InvalidRouteTableID.NotFound"), + ], ), ) data.update(tgw_data) @@ -51,40 +73,40 @@ def __init__(self, module, **kwargs): # retry - retries the full fetch, but better than simply giving up. @AWSRetry.jittered_backoff() def _paginated_describe_transit_gateway_vpc_attachments(self, **params): - paginator = self.client.get_paginator('describe_transit_gateway_vpc_attachments') + paginator = self.client.get_paginator("describe_transit_gateway_vpc_attachments") return paginator.paginate(**params).build_full_result() - @Boto3Mixin.aws_error_handler('describe transit gateway attachments') + @Boto3Mixin.aws_error_handler("describe transit gateway attachments") def _describe_vpc_attachments(self, **params): result = self._paginated_describe_transit_gateway_vpc_attachments(**params) - return result.get('TransitGatewayVpcAttachments', None) + return result.get("TransitGatewayVpcAttachments", None) - @Boto3Mixin.aws_error_handler('create transit gateway attachment') + @Boto3Mixin.aws_error_handler("create transit gateway attachment") def _create_vpc_attachment(self, **params): result = self.client.create_transit_gateway_vpc_attachment(aws_retry=True, **params) - return result.get('TransitGatewayVpcAttachment', None) + return result.get("TransitGatewayVpcAttachment", None) - @Boto3Mixin.aws_error_handler('modify transit gateway attachment') + @Boto3Mixin.aws_error_handler("modify transit gateway attachment") def _modify_vpc_attachment(self, **params): result = self.client.modify_transit_gateway_vpc_attachment(aws_retry=True, **params) - return result.get('TransitGatewayVpcAttachment', None) + return result.get("TransitGatewayVpcAttachment", None) - @Boto3Mixin.aws_error_handler('delete transit gateway attachment') + @Boto3Mixin.aws_error_handler("delete transit gateway attachment") def _delete_vpc_attachment(self, **params): try: result = self.client.delete_transit_gateway_vpc_attachment(aws_retry=True, **params) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return None - return result.get('TransitGatewayVpcAttachment', None) + return result.get("TransitGatewayVpcAttachment", None) - @Boto3Mixin.aws_error_handler('transit gateway attachment to finish deleting') + @Boto3Mixin.aws_error_handler("transit gateway attachment to finish deleting") def _wait_tgw_attachment_deleted(self, **params): - waiter = self.tgw_waiter_factory.get_waiter('tgw_attachment_deleted') + waiter = self.tgw_waiter_factory.get_waiter("tgw_attachment_deleted") waiter.wait(**params) - @Boto3Mixin.aws_error_handler('transit gateway attachment to become available') + @Boto3Mixin.aws_error_handler("transit gateway attachment to become available") def _wait_tgw_attachment_available(self, **params): - waiter = self.tgw_waiter_factory.get_waiter('tgw_attachment_available') + waiter = self.tgw_waiter_factory.get_waiter("tgw_attachment_available") waiter.wait(**params) def _normalize_tgw_attachment(self, rtb): @@ -103,11 +125,10 @@ def _get_tgw_vpc_attachment(self, **params): class BaseTGWManager(BaseEc2Manager): - - @Boto3Mixin.aws_error_handler('connect to AWS') - def _create_client(self, client_name='ec2'): - if client_name == 'ec2': - error_codes = ['IncorrectState'] + @Boto3Mixin.aws_error_handler("connect to AWS") + def _create_client(self, client_name="ec2"): + if client_name == "ec2": + error_codes = ["IncorrectState"] else: error_codes = [] @@ -119,8 +140,7 @@ def _create_client(self, client_name='ec2'): class TransitGatewayVpcAttachmentManager(TGWAttachmentBoto3Mixin, BaseTGWManager): - - TAG_RESOURCE_TYPE = 'transit-gateway-attachment' + TAG_RESOURCE_TYPE = "transit-gateway-attachment" def __init__(self, module, id=None): self._subnet_updates = dict() @@ -131,7 +151,7 @@ def _get_id_params(self, id=None, id_list=False): id = self.resource_id if not id: # Users should never see this, but let's cover ourself - self.module.fail_json(msg='Attachment identifier parameter missing') + self.module.fail_json(msg="Attachment identifier parameter missing") if id_list: return dict(TransitGatewayAttachmentIds=[id]) @@ -140,18 +160,18 @@ def _get_id_params(self, id=None, id_list=False): def _extra_error_output(self): output = super(TransitGatewayVpcAttachmentManager, self)._extra_error_output() if self.resource_id: - output['TransitGatewayAttachmentId'] = self.resource_id + output["TransitGatewayAttachmentId"] = self.resource_id return output def _filter_immutable_resource_attributes(self, resource): resource = super(TransitGatewayVpcAttachmentManager, self)._filter_immutable_resource_attributes(resource) - resource.pop('TransitGatewayId', None) - resource.pop('VpcId', None) - resource.pop('VpcOwnerId', None) - resource.pop('State', None) - resource.pop('SubnetIds', None) - resource.pop('CreationTime', None) - resource.pop('Tags', None) + resource.pop("TransitGatewayId", None) + resource.pop("VpcId", None) + resource.pop("VpcOwnerId", None) + resource.pop("State", None) + resource.pop("SubnetIds", None) + resource.pop("CreationTime", None) + resource.pop("Tags", None) return resource def _set_option(self, name, value): @@ -159,36 +179,36 @@ def _set_option(self, name, value): return False # For now VPC Attachment options are all enable/disable if value: - value = 'enable' + value = "enable" else: - value = 'disable' + value = "disable" - options = deepcopy(self._preupdate_resource.get('Options', dict())) - options.update(self._resource_updates.get('Options', dict())) + options = deepcopy(self._preupdate_resource.get("Options", dict())) + options.update(self._resource_updates.get("Options", dict())) options[name] = value - return self._set_resource_value('Options', options) + return self._set_resource_value("Options", options) def set_dns_support(self, value): - return self._set_option('DnsSupport', value) + return self._set_option("DnsSupport", value) def set_ipv6_support(self, value): - return self._set_option('Ipv6Support', value) + return self._set_option("Ipv6Support", value) def set_appliance_mode_support(self, value): - return self._set_option('ApplianceModeSupport', value) + return self._set_option("ApplianceModeSupport", value) def set_transit_gateway(self, tgw_id): - return self._set_resource_value('TransitGatewayId', tgw_id) + return self._set_resource_value("TransitGatewayId", tgw_id) def set_vpc(self, vpc_id): - return self._set_resource_value('VpcId', vpc_id) + return self._set_resource_value("VpcId", vpc_id) def set_subnets(self, subnets=None, purge=True): if subnets is None: return False - current_subnets = set(self._preupdate_resource.get('SubnetIds', [])) + current_subnets = set(self._preupdate_resource.get("SubnetIds", [])) desired_subnets = set(subnets) if not purge: desired_subnets = desired_subnets.union(current_subnets) @@ -197,21 +217,23 @@ def set_subnets(self, subnets=None, purge=True): # information we 'know'. subnet_details = self._describe_subnets(SubnetIds=list(desired_subnets)) vpc_id = self.subnets_to_vpc(desired_subnets, subnet_details) - self._set_resource_value('VpcId', vpc_id, immutable=True) + self._set_resource_value("VpcId", vpc_id, immutable=True) # Only one subnet per-AZ is permitted - azs = [s.get('AvailabilityZoneId') for s in subnet_details] + azs = [s.get("AvailabilityZoneId") for s in subnet_details] if len(azs) != len(set(azs)): self.module.fail_json( - msg='Only one attachment subnet per availability zone may be set.', - availability_zones=azs, subnets=subnet_details) + msg="Only one attachment subnet per availability zone may be set.", + availability_zones=azs, + subnets=subnet_details, + ) subnets_to_add = list(desired_subnets.difference(current_subnets)) subnets_to_remove = list(current_subnets.difference(desired_subnets)) if not subnets_to_remove and not subnets_to_add: return False self._subnet_updates = dict(add=subnets_to_add, remove=subnets_to_remove) - self._set_resource_value('SubnetIds', list(desired_subnets)) + self._set_resource_value("SubnetIds", list(desired_subnets)) return True def subnets_to_vpc(self, subnets, subnet_details=None): @@ -221,11 +243,13 @@ def subnets_to_vpc(self, subnets, subnet_details=None): if subnet_details is None: subnet_details = self._describe_subnets(SubnetIds=list(subnets)) - vpcs = [s.get('VpcId') for s in subnet_details] + vpcs = [s.get("VpcId") for s in subnet_details] if len(set(vpcs)) > 1: self.module.fail_json( - msg='Attachment subnets may only be in one VPC, multiple VPCs found', - vpcs=list(set(vpcs)), subnets=subnet_details) + msg="Attachment subnets may only be in one VPC, multiple VPCs found", + vpcs=list(set(vpcs)), + subnets=subnet_details, + ) return vpcs[0] @@ -248,26 +272,25 @@ def _do_create_resource(self): params = self._merge_resource_changes(filter_immutable=False, creation=True) response = self._create_vpc_attachment(**params) if response: - self.resource_id = response.get('TransitGatewayAttachmentId', None) + self.resource_id = response.get("TransitGatewayAttachmentId", None) return response def _do_update_resource(self): - if self._preupdate_resource.get('State', None) == 'pending': + if self._preupdate_resource.get("State", None) == "pending": # Resources generally don't like it if you try to update before creation # is complete. If things are in a 'pending' state they'll often throw # exceptions. self._wait_for_creation() - elif self._preupdate_resource.get('State', None) == 'deleting': - self.module.fail_json(msg='Deletion in progress, unable to update', - route_tables=[self.original_resource]) + elif self._preupdate_resource.get("State", None) == "deleting": + self.module.fail_json(msg="Deletion in progress, unable to update", route_tables=[self.original_resource]) updates = self._filter_immutable_resource_attributes(self._resource_updates) - subnets_to_add = self._subnet_updates.get('add', []) - subnets_to_remove = self._subnet_updates.get('remove', []) + subnets_to_add = self._subnet_updates.get("add", []) + subnets_to_remove = self._subnet_updates.get("remove", []) if subnets_to_add: - updates['AddSubnetIds'] = subnets_to_add + updates["AddSubnetIds"] = subnets_to_add if subnets_to_remove: - updates['RemoveSubnetIds'] = subnets_to_remove + updates["RemoveSubnetIds"] = subnets_to_remove if not updates: return False @@ -283,7 +306,6 @@ def get_resource(self): return self.get_attachment() def delete(self, id=None): - if id: id_params = self._get_id_params(id=id, id_list=True) result = self._get_tgw_vpc_attachment(**id_params) @@ -295,7 +317,7 @@ def delete(self, id=None): if not result: return False - if result.get('State') == 'deleting': + if result.get("State") == "deleting": self._wait_for_deletion() return False @@ -315,9 +337,9 @@ def delete(self, id=None): def list(self, filters=None, id=None): params = dict() if id: - params['TransitGatewayAttachmentIds'] = [id] + params["TransitGatewayAttachmentIds"] = [id] if filters: - params['Filters'] = ansible_dict_to_boto3_filter_list(filters) + params["Filters"] = ansible_dict_to_boto3_filter_list(filters) attachments = self._describe_vpc_attachments(**params) if not attachments: return list() @@ -325,7 +347,6 @@ def list(self, filters=None, id=None): return [self._normalize_tgw_attachment(a) for a in attachments] def get_attachment(self, id=None): - # RouteTable needs a list, Association/Propagation needs a single ID id_params = self._get_id_params(id=id, id_list=True) id_param = self._get_id_params(id=id, id_list=False) diff --git a/plugins/module_utils/wafv2.py b/plugins/module_utils/wafv2.py index c1770978b9c..c0eb363efa4 100644 --- a/plugins/module_utils/wafv2.py +++ b/plugins/module_utils/wafv2.py @@ -19,7 +19,7 @@ def _list_tags(wafv2, arn, fail_json_aws, next_marker=None): params = dict(ResourceARN=arn) if next_marker: - params['NextMarker'] = next_marker + params["NextMarker"] = next_marker try: return wafv2.list_tags_for_resource(**params) except (BotoCoreError, ClientError) as e: @@ -32,9 +32,9 @@ def describe_wafv2_tags(wafv2, arn, fail_json_aws): # there is currently no paginator for wafv2 while True: responce = _list_tags(wafv2, arn, fail_json_aws) - next_marker = responce.get('NextMarker', None) - tag_info = responce.get('TagInfoForResource', {}) - tag_list.extend(tag_info.get('TagList', [])) + next_marker = responce.get("NextMarker", None) + tag_info = responce.get("TagInfoForResource", {}) + tag_list.extend(tag_info.get("TagList", [])) if not next_marker: break return boto3_tag_list_to_ansible_dict(tag_list) @@ -69,39 +69,37 @@ def ensure_wafv2_tags(wafv2, arn, tags, purge_tags, fail_json_aws, check_mode): def wafv2_list_web_acls(wafv2, scope, fail_json_aws, nextmarker=None): # there is currently no paginator for wafv2 - req_obj = { - 'Scope': scope, - 'Limit': 100 - } + req_obj = {"Scope": scope, "Limit": 100} if nextmarker: - req_obj['NextMarker'] = nextmarker + req_obj["NextMarker"] = nextmarker try: response = wafv2.list_web_acls(**req_obj) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to list wafv2 web acl") - if response.get('NextMarker'): - response['WebACLs'] += wafv2_list_web_acls(wafv2, scope, fail_json_aws, nextmarker=response.get('NextMarker')).get('WebACLs') + if response.get("NextMarker"): + response["WebACLs"] += wafv2_list_web_acls( + wafv2, scope, fail_json_aws, nextmarker=response.get("NextMarker") + ).get("WebACLs") return response def wafv2_list_rule_groups(wafv2, scope, fail_json_aws, nextmarker=None): # there is currently no paginator for wafv2 - req_obj = { - 'Scope': scope, - 'Limit': 100 - } + req_obj = {"Scope": scope, "Limit": 100} if nextmarker: - req_obj['NextMarker'] = nextmarker + req_obj["NextMarker"] = nextmarker try: response = wafv2.list_rule_groups(**req_obj) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to list wafv2 rule group") - if response.get('NextMarker'): - response['RuleGroups'] += wafv2_list_rule_groups(wafv2, scope, fail_json_aws, nextmarker=response.get('NextMarker')).get('RuleGroups') + if response.get("NextMarker"): + response["RuleGroups"] += wafv2_list_rule_groups( + wafv2, scope, fail_json_aws, nextmarker=response.get("NextMarker") + ).get("RuleGroups") return response @@ -112,20 +110,20 @@ def wafv2_snake_dict_to_camel_dict(a): retval = {} for item in a.keys(): if isinstance(a.get(item), dict): - if 'Ip' in item: - retval[item.replace('Ip', 'IP')] = wafv2_snake_dict_to_camel_dict(a.get(item)) - elif 'Arn' == item: - retval['ARN'] = wafv2_snake_dict_to_camel_dict(a.get(item)) + if "Ip" in item: + retval[item.replace("Ip", "IP")] = wafv2_snake_dict_to_camel_dict(a.get(item)) + elif "Arn" == item: + retval["ARN"] = wafv2_snake_dict_to_camel_dict(a.get(item)) else: retval[item] = wafv2_snake_dict_to_camel_dict(a.get(item)) elif isinstance(a.get(item), list): retval[item] = [] for idx in range(len(a.get(item))): retval[item].append(wafv2_snake_dict_to_camel_dict(a.get(item)[idx])) - elif 'Ip' in item: - retval[item.replace('Ip', 'IP')] = a.get(item) - elif 'Arn' == item: - retval['ARN'] = a.get(item) + elif "Ip" in item: + retval[item.replace("Ip", "IP")] = a.get(item) + elif "Arn" == item: + retval["ARN"] = a.get(item) else: retval[item] = a.get(item) return retval @@ -138,24 +136,31 @@ def nested_byte_values_to_strings(rule, keyname): - AndStatement - NotStatement """ - if rule.get('Statement', {}).get(keyname): - for idx in range(len(rule.get('Statement', {}).get(keyname, {}).get('Statements'))): - if rule['Statement'][keyname]['Statements'][idx].get('ByteMatchStatement'): - rule['Statement'][keyname]['Statements'][idx]['ByteMatchStatement']['SearchString'] = \ - rule.get('Statement').get(keyname).get('Statements')[idx].get('ByteMatchStatement').get('SearchString').decode('utf-8') + if rule.get("Statement", {}).get(keyname): + for idx in range(len(rule.get("Statement", {}).get(keyname, {}).get("Statements"))): + if rule["Statement"][keyname]["Statements"][idx].get("ByteMatchStatement"): + rule["Statement"][keyname]["Statements"][idx]["ByteMatchStatement"]["SearchString"] = ( + rule.get("Statement") + .get(keyname) + .get("Statements")[idx] + .get("ByteMatchStatement") + .get("SearchString") + .decode("utf-8") + ) return rule def byte_values_to_strings_before_compare(rules): for idx in range(len(rules)): - if rules[idx].get('Statement', {}).get('ByteMatchStatement', {}).get('SearchString'): - rules[idx]['Statement']['ByteMatchStatement']['SearchString'] = \ - rules[idx].get('Statement').get('ByteMatchStatement').get('SearchString').decode('utf-8') + if rules[idx].get("Statement", {}).get("ByteMatchStatement", {}).get("SearchString"): + rules[idx]["Statement"]["ByteMatchStatement"]["SearchString"] = ( + rules[idx].get("Statement").get("ByteMatchStatement").get("SearchString").decode("utf-8") + ) else: - for statement in ['AndStatement', 'OrStatement', 'NotStatement']: - if rules[idx].get('Statement', {}).get(statement): + for statement in ["AndStatement", "OrStatement", "NotStatement"]: + if rules[idx].get("Statement", {}).get(statement): rules[idx] = nested_byte_values_to_strings(rules[idx], statement) return rules @@ -163,11 +168,11 @@ def byte_values_to_strings_before_compare(rules): def compare_priority_rules(existing_rules, requested_rules, purge_rules, state): diff = False - existing_rules = sorted(existing_rules, key=lambda k: k['Priority']) + existing_rules = sorted(existing_rules, key=lambda k: k["Priority"]) existing_rules = byte_values_to_strings_before_compare(existing_rules) - requested_rules = sorted(requested_rules, key=lambda k: k['Priority']) + requested_rules = sorted(requested_rules, key=lambda k: k["Priority"]) - if purge_rules and state == 'present': + if purge_rules and state == "present": merged_rules = requested_rules if len(existing_rules) == len(requested_rules): for idx in range(len(existing_rules)): @@ -185,8 +190,8 @@ def compare_priority_rules(existing_rules, requested_rules, purge_rules, state): ex_idx_pop = [] for existing_idx in range(len(existing_rules)): for requested_idx in range(len(requested_rules)): - if existing_rules[existing_idx].get('Priority') == requested_rules[requested_idx].get('Priority'): - if state == 'present': + if existing_rules[existing_idx].get("Priority") == requested_rules[requested_idx].get("Priority"): + if state == "present": ex_idx_pop.append(existing_idx) if existing_rules[existing_idx] != requested_rules[requested_idx]: diff = True @@ -198,7 +203,7 @@ def compare_priority_rules(existing_rules, requested_rules, purge_rules, state): for idx in ex_idx_pop: existing_rules.pop(idx) - if state == 'present': + if state == "present": merged_rules = existing_rules + requested_rules if len(merged_rules) != prev_count: diff --git a/plugins/modules/accessanalyzer_validate_policy_info.py b/plugins/modules/accessanalyzer_validate_policy_info.py index 817f414671b..fab777175e7 100644 --- a/plugins/modules/accessanalyzer_validate_policy_info.py +++ b/plugins/modules/accessanalyzer_validate_policy_info.py @@ -177,11 +177,10 @@ def filter_findings(findings, type_filter): return findings # Convert type_filter to the findingType strings returned by the API - filter_map = dict(error='ERROR', security='SECURITY_WARNING', - suggestion='SUGGESTION', warning='WARNING') + filter_map = dict(error="ERROR", security="SECURITY_WARNING", suggestion="SUGGESTION", warning="WARNING") allowed_types = [filter_map[t] for t in type_filter] - filtered_results = [f for f in findings if f.get('findingType', None) in allowed_types] + filtered_results = [f for f in findings if f.get("findingType", None) in allowed_types] return filtered_results @@ -190,47 +189,47 @@ def main(): # values are likely to be expanded, let's avoid hard coding limits which might not hold true in # the long term... argument_spec = dict( - policy=dict(required=True, type='json', aliases=['policy_document']), - locale=dict(required=False, type='str', default='EN'), - policy_type=dict(required=False, type='str', default='identity', - choices=['identity', 'resource', 'service_control']), - resource_type=dict(required=False, type='str'), - results_filter=dict(required=False, type='list', elements='str', - choices=['error', 'security', 'suggestion', 'warning']), + policy=dict(required=True, type="json", aliases=["policy_document"]), + locale=dict(required=False, type="str", default="EN"), + policy_type=dict( + required=False, type="str", default="identity", choices=["identity", "resource", "service_control"] + ), + resource_type=dict(required=False, type="str"), + results_filter=dict( + required=False, type="list", elements="str", choices=["error", "security", "suggestion", "warning"] + ), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - policy_type_map = dict(identity='IDENTITY_POLICY', resource='RESOURCE_POLICY', - service_control='SERVICE_CONTROL_POLICY') + policy_type_map = dict( + identity="IDENTITY_POLICY", resource="RESOURCE_POLICY", service_control="SERVICE_CONTROL_POLICY" + ) - policy = module.params.get('policy') - policy_type = policy_type_map[module.params.get('policy_type')] - locale = module.params.get('locale').upper() - resource_type = module.params.get('resource_type') - results_filter = module.params.get('results_filter') + policy = module.params.get("policy") + policy_type = policy_type_map[module.params.get("policy_type")] + locale = module.params.get("locale").upper() + resource_type = module.params.get("resource_type") + results_filter = module.params.get("results_filter") try: - client = module.client('accessanalyzer', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("accessanalyzer", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") params = dict(locale=locale, policyDocument=policy, policyType=policy_type) - if policy_type == 'RESOURCE_POLICY' and resource_type: - params['policyType'] = resource_type + if policy_type == "RESOURCE_POLICY" and resource_type: + params["policyType"] = resource_type results = client.validate_policy(aws_retry=True, **params) - findings = filter_findings(results.get('findings', []), results_filter) - results['findings'] = findings + findings = filter_findings(results.get("findings", []), results_filter) + results["findings"] = findings results = camel_dict_to_snake_dict(results) module.exit_json(changed=False, **results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/acm_certificate.py b/plugins/modules/acm_certificate.py index e7ea9c6d87a..197124fb59e 100644 --- a/plugins/modules/acm_certificate.py +++ b/plugins/modules/acm_certificate.py @@ -276,12 +276,10 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: - module.fail_json_aws( - e, "Couldn't add tags to certificate {0}".format(resource_arn) - ) + module.fail_json_aws(e, "Couldn't add tags to certificate {0}".format(resource_arn)) if tags_to_remove and not module.check_mode: # remove_tags_from_certificate wants a list of key, value pairs, not a list of keys. - tags_list = [{'Key': key, 'Value': existing_tags.get(key)} for key in tags_to_remove] + tags_list = [{"Key": key, "Value": existing_tags.get(key)} for key in tags_to_remove] try: client.remove_tags_from_certificate( CertificateArn=resource_arn, @@ -291,9 +289,7 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: - module.fail_json_aws( - e, "Couldn't remove tags from certificate {0}".format(resource_arn) - ) + module.fail_json_aws(e, "Couldn't remove tags from certificate {0}".format(resource_arn)) new_tags = deepcopy(existing_tags) for key, value in tags_to_add.items(): new_tags[key] = value @@ -308,7 +304,6 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): # May include some lines between each chain in the cert, e.g. "Subject: ..." # Returns True iff the chains/certs are functionally identical (including chain order) def chain_compare(module, a, b): - chain_a_pem = pem_chain_split(module, a) chain_b_pem = pem_chain_split(module, b) @@ -316,7 +311,7 @@ def chain_compare(module, a, b): return False # Chain length is the same - for (ca, cb) in zip(chain_a_pem, chain_b_pem): + for ca, cb in zip(chain_a_pem, chain_b_pem): der_a = PEM_body_to_DER(module, ca) der_b = PEM_body_to_DER(module, cb) if der_a != der_b: @@ -336,7 +331,9 @@ def PEM_body_to_DER(module, pem): # Store this globally to avoid repeated recompilation -pem_chain_split_regex = re.compile(r"------?BEGIN [A-Z0-9. ]*CERTIFICATE------?([a-zA-Z0-9\+\/=\s]+)------?END [A-Z0-9. ]*CERTIFICATE------?") +pem_chain_split_regex = re.compile( + r"------?BEGIN [A-Z0-9. ]*CERTIFICATE------?([a-zA-Z0-9\+\/=\s]+)------?END [A-Z0-9. ]*CERTIFICATE------?" +) # Use regex to split up a chain or single cert into an array of base64 encoded data @@ -344,7 +341,6 @@ def PEM_body_to_DER(module, pem): # Noting that some chains have non-pem data in between each cert # This function returns only what's between the headers, excluding the headers def pem_chain_split(module, pem): - pem_arr = re.findall(pem_chain_split_regex, to_text(pem)) if len(pem_arr) == 0: @@ -359,53 +355,55 @@ def update_imported_certificate(client, module, acm, old_cert, desired_tags): Update the existing certificate that was previously imported in ACM. """ module.debug("Existing certificate found in ACM") - if ('tags' not in old_cert) or ('Name' not in old_cert['tags']): + if ("tags" not in old_cert) or ("Name" not in old_cert["tags"]): # shouldn't happen module.fail_json(msg="Internal error, unsure which certificate to update", certificate=old_cert) - if module.params.get('name_tag') is not None and (old_cert['tags']['Name'] != module.params.get('name_tag')): + if module.params.get("name_tag") is not None and (old_cert["tags"]["Name"] != module.params.get("name_tag")): # This could happen if the user identified the certificate using 'certificate_arn' or 'domain_name', # and the 'Name' tag in the AWS API does not match the ansible 'name_tag'. module.fail_json(msg="Internal error, Name tag does not match", certificate=old_cert) - if 'certificate' not in old_cert: + if "certificate" not in old_cert: # shouldn't happen module.fail_json(msg="Internal error, unsure what the existing cert in ACM is", certificate=old_cert) cert_arn = None # Are the existing certificate in ACM and the local certificate the same? same = True - if module.params.get('certificate') is not None: - same &= chain_compare(module, old_cert['certificate'], module.params['certificate']) - if module.params['certificate_chain']: + if module.params.get("certificate") is not None: + same &= chain_compare(module, old_cert["certificate"], module.params["certificate"]) + if module.params["certificate_chain"]: # Need to test this # not sure if Amazon appends the cert itself to the chain when self-signed - same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate_chain']) + same &= chain_compare(module, old_cert["certificate_chain"], module.params["certificate_chain"]) else: # When there is no chain with a cert # it seems Amazon returns the cert itself as the chain - same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate']) + same &= chain_compare(module, old_cert["certificate_chain"], module.params["certificate"]) if same: module.debug("Existing certificate in ACM is the same") - cert_arn = old_cert['certificate_arn'] + cert_arn = old_cert["certificate_arn"] changed = False else: - absent_args = ['certificate', 'name_tag', 'private_key'] + absent_args = ["certificate", "name_tag", "private_key"] if sum([(module.params[a] is not None) for a in absent_args]) < 3: - module.fail_json(msg="When importing a certificate, all of 'name_tag', 'certificate' and 'private_key' must be specified") + module.fail_json( + msg="When importing a certificate, all of 'name_tag', 'certificate' and 'private_key' must be specified" + ) module.debug("Existing certificate in ACM is different, overwriting") changed = True if module.check_mode: - cert_arn = old_cert['certificate_arn'] + cert_arn = old_cert["certificate_arn"] # note: returned domain will be the domain of the previous cert else: # update cert in ACM cert_arn = acm.import_certificate( client, module, - certificate=module.params['certificate'], - private_key=module.params['private_key'], - certificate_chain=module.params['certificate_chain'], - arn=old_cert['certificate_arn'], + certificate=module.params["certificate"], + private_key=module.params["private_key"], + certificate_chain=module.params["certificate_chain"], + arn=old_cert["certificate_arn"], tags=desired_tags, ) return (changed, cert_arn) @@ -416,22 +414,24 @@ def import_certificate(client, module, acm, desired_tags): Import a certificate to ACM. """ # Validate argument requirements - absent_args = ['certificate', 'name_tag', 'private_key'] + absent_args = ["certificate", "name_tag", "private_key"] cert_arn = None if sum([(module.params[a] is not None) for a in absent_args]) < 3: - module.fail_json(msg="When importing a new certificate, all of 'name_tag', 'certificate' and 'private_key' must be specified") + module.fail_json( + msg="When importing a new certificate, all of 'name_tag', 'certificate' and 'private_key' must be specified" + ) module.debug("No certificate in ACM. Creating new one.") changed = True if module.check_mode: - domain = 'example.com' + domain = "example.com" module.exit_json(certificate=dict(domain_name=domain), changed=True) else: cert_arn = acm.import_certificate( client, module, - certificate=module.params['certificate'], - private_key=module.params['private_key'], - certificate_chain=module.params['certificate_chain'], + certificate=module.params["certificate"], + private_key=module.params["private_key"], + certificate_chain=module.params["certificate_chain"], tags=desired_tags, ) return (changed, cert_arn) @@ -441,7 +441,7 @@ def ensure_certificates_present(client, module, acm, certificates, desired_tags, cert_arn = None changed = False if len(certificates) > 1: - msg = "More than one certificate with Name=%s exists in ACM in this region" % module.params['name_tag'] + msg = "More than one certificate with Name=%s exists in ACM in this region" % module.params["name_tag"] module.fail_json(msg=msg, certificates=certificates) elif len(certificates) == 1: # Update existing certificate that was previously imported to ACM. @@ -452,11 +452,13 @@ def ensure_certificates_present(client, module, acm, certificates, desired_tags, # Add/remove tags to/from certificate try: - existing_tags = boto3_tag_list_to_ansible_dict(client.list_tags_for_certificate(CertificateArn=cert_arn)['Tags']) + existing_tags = boto3_tag_list_to_ansible_dict( + client.list_tags_for_certificate(CertificateArn=cert_arn)["Tags"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Couldn't get tags for certificate") - purge_tags = module.params.get('purge_tags') + purge_tags = module.params.get("purge_tags") (c, new_tags) = ensure_tags(client, module, cert_arn, existing_tags, desired_tags, purge_tags) changed |= c domain = acm.get_domain_of_cert(client=client, module=module, arn=cert_arn) @@ -466,21 +468,21 @@ def ensure_certificates_present(client, module, acm, certificates, desired_tags, def ensure_certificates_absent(client, module, acm, certificates): for cert in certificates: if not module.check_mode: - acm.delete_certificate(client, module, cert['certificate_arn']) - module.exit_json(arns=[cert['certificate_arn'] for cert in certificates], changed=(len(certificates) > 0)) + acm.delete_certificate(client, module, cert["certificate_arn"]) + module.exit_json(arns=[cert["certificate_arn"] for cert in certificates], changed=(len(certificates) > 0)) def main(): argument_spec = dict( certificate=dict(), - certificate_arn=dict(aliases=['arn']), + certificate_arn=dict(aliases=["arn"]), certificate_chain=dict(), - domain_name=dict(aliases=['domain']), - name_tag=dict(aliases=['name']), + domain_name=dict(aliases=["domain"]), + name_tag=dict(aliases=["name"]), private_key=dict(no_log=True), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - state=dict(default='present', choices=['present', 'absent']), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + state=dict(default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, @@ -489,62 +491,66 @@ def main(): acm = ACMServiceManager(module) # Check argument requirements - if module.params['state'] == 'present': + if module.params["state"] == "present": # at least one of these should be specified. - absent_args = ['certificate_arn', 'domain_name', 'name_tag'] + absent_args = ["certificate_arn", "domain_name", "name_tag"] if sum([(module.params[a] is not None) for a in absent_args]) < 1: for a in absent_args: module.debug("%s is %s" % (a, module.params[a])) - module.fail_json(msg="If 'state' is specified as 'present' then at least one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified") + module.fail_json( + msg="If 'state' is specified as 'present' then at least one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified" + ) else: # absent # exactly one of these should be specified - absent_args = ['certificate_arn', 'domain_name', 'name_tag'] + absent_args = ["certificate_arn", "domain_name", "name_tag"] if sum([(module.params[a] is not None) for a in absent_args]) != 1: for a in absent_args: module.debug("%s is %s" % (a, module.params[a])) - module.fail_json(msg="If 'state' is specified as 'absent' then exactly one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified") + module.fail_json( + msg="If 'state' is specified as 'absent' then exactly one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified" + ) filter_tags = None desired_tags = None - if module.params.get('tags') is not None: - desired_tags = module.params['tags'] + if module.params.get("tags") is not None: + desired_tags = module.params["tags"] else: # Because we're setting the Name tag, we need to explicitly not purge when tags isn't passed - module.params['purge_tags'] = False - if module.params.get('name_tag') is not None: + module.params["purge_tags"] = False + if module.params.get("name_tag") is not None: # The module was originally implemented to filter certificates based on the 'Name' tag. # Other tags are not used to filter certificates. # It would make sense to replace the existing name_tag, domain, certificate_arn attributes # with a 'filter' attribute, but that would break backwards-compatibility. - filter_tags = dict(Name=module.params['name_tag']) + filter_tags = dict(Name=module.params["name_tag"]) if desired_tags is not None: - if 'Name' in desired_tags: - if desired_tags['Name'] != module.params['name_tag']: + if "Name" in desired_tags: + if desired_tags["Name"] != module.params["name_tag"]: module.fail_json(msg="Value of 'name_tag' conflicts with value of 'tags.Name'") else: - desired_tags['Name'] = module.params['name_tag'] + desired_tags["Name"] = module.params["name_tag"] else: desired_tags = deepcopy(filter_tags) - client = module.client('acm') + client = module.client("acm") # fetch the list of certificates currently in ACM certificates = acm.get_certificates( client=client, module=module, - domain_name=module.params['domain_name'], - arn=module.params['certificate_arn'], + domain_name=module.params["domain_name"], + arn=module.params["certificate_arn"], only_tags=filter_tags, ) module.debug("Found %d corresponding certificates in ACM" % len(certificates)) - if module.params['state'] == 'present': + if module.params["state"] == "present": ensure_certificates_present(client, module, acm, certificates, desired_tags, filter_tags) else: # state == absent ensure_certificates_absent(client, module, acm, certificates) -if __name__ == '__main__': +if __name__ == "__main__": # tests() main() diff --git a/plugins/modules/acm_certificate_info.py b/plugins/modules/acm_certificate_info.py index 2364751f519..287e7006aef 100644 --- a/plugins/modules/acm_certificate_info.py +++ b/plugins/modules/acm_certificate_info.py @@ -264,31 +264,42 @@ def main(): argument_spec = dict( - certificate_arn=dict(aliases=['arn']), - domain_name=dict(aliases=['name']), + certificate_arn=dict(aliases=["arn"]), + domain_name=dict(aliases=["name"]), statuses=dict( - type='list', - elements='str', - choices=['PENDING_VALIDATION', 'ISSUED', 'INACTIVE', 'EXPIRED', 'VALIDATION_TIMED_OUT', 'REVOKED', 'FAILED'] + type="list", + elements="str", + choices=[ + "PENDING_VALIDATION", + "ISSUED", + "INACTIVE", + "EXPIRED", + "VALIDATION_TIMED_OUT", + "REVOKED", + "FAILED", + ], ), - tags=dict(type='dict'), + tags=dict(type="dict"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) acm_info = ACMServiceManager(module) - client = module.client('acm') + client = module.client("acm") - certificates = acm_info.get_certificates(client, module, - domain_name=module.params['domain_name'], - statuses=module.params['statuses'], - arn=module.params['certificate_arn'], - only_tags=module.params['tags']) + certificates = acm_info.get_certificates( + client, + module, + domain_name=module.params["domain_name"], + statuses=module.params["statuses"], + arn=module.params["certificate_arn"], + only_tags=module.params["tags"], + ) - if module.params['certificate_arn'] and len(certificates) != 1: - module.fail_json(msg="No certificate exists in this region with ARN %s" % module.params['certificate_arn']) + if module.params["certificate_arn"] and len(certificates) != 1: + module.fail_json(msg="No certificate exists in this region with ARN %s" % module.params["certificate_arn"]) module.exit_json(certificates=certificates) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/api_gateway.py b/plugins/modules/api_gateway.py index 615c3d89aa3..176404f644d 100644 --- a/plugins/modules/api_gateway.py +++ b/plugins/modules/api_gateway.py @@ -182,22 +182,22 @@ def main(): argument_spec = dict( - api_id=dict(type='str', required=False), - state=dict(type='str', default='present', choices=['present', 'absent']), - swagger_file=dict(type='path', default=None, aliases=['src', 'api_file']), - swagger_dict=dict(type='json', default=None), - swagger_text=dict(type='str', default=None), - stage=dict(type='str', default=None), - deploy_desc=dict(type='str', default="Automatic deployment by Ansible."), - cache_enabled=dict(type='bool', default=False), - cache_size=dict(type='str', default='0.5', choices=['0.5', '1.6', '6.1', '13.5', '28.4', '58.2', '118', '237']), - stage_variables=dict(type='dict', default={}), - stage_canary_settings=dict(type='dict', default={}), - tracing_enabled=dict(type='bool', default=False), - endpoint_type=dict(type='str', default='EDGE', choices=['EDGE', 'REGIONAL', 'PRIVATE']) + api_id=dict(type="str", required=False), + state=dict(type="str", default="present", choices=["present", "absent"]), + swagger_file=dict(type="path", default=None, aliases=["src", "api_file"]), + swagger_dict=dict(type="json", default=None), + swagger_text=dict(type="str", default=None), + stage=dict(type="str", default=None), + deploy_desc=dict(type="str", default="Automatic deployment by Ansible."), + cache_enabled=dict(type="bool", default=False), + cache_size=dict(type="str", default="0.5", choices=["0.5", "1.6", "6.1", "13.5", "28.4", "58.2", "118", "237"]), + stage_variables=dict(type="dict", default={}), + stage_canary_settings=dict(type="dict", default={}), + tracing_enabled=dict(type="bool", default=False), + endpoint_type=dict(type="str", default="EDGE", choices=["EDGE", "REGIONAL", "PRIVATE"]), ) - mutually_exclusive = [['swagger_file', 'swagger_dict', 'swagger_text']] # noqa: F841 + mutually_exclusive = [["swagger_file", "swagger_dict", "swagger_text"]] # noqa: F841 module = AnsibleAWSModule( argument_spec=argument_spec, @@ -205,16 +205,16 @@ def main(): mutually_exclusive=mutually_exclusive, ) - api_id = module.params.get('api_id') - state = module.params.get('state') # noqa: F841 - swagger_file = module.params.get('swagger_file') - swagger_dict = module.params.get('swagger_dict') - swagger_text = module.params.get('swagger_text') - endpoint_type = module.params.get('endpoint_type') + api_id = module.params.get("api_id") + state = module.params.get("state") # noqa: F841 + swagger_file = module.params.get("swagger_file") + swagger_dict = module.params.get("swagger_dict") + swagger_text = module.params.get("swagger_text") + endpoint_type = module.params.get("endpoint_type") - client = module.client('apigateway') + client = module.client("apigateway") - changed = True # for now it will stay that way until we can sometimes avoid change + changed = True # for now it will stay that way until we can sometimes avoid change conf_res = None dep_res = None del_res = None @@ -222,8 +222,9 @@ def main(): if state == "present": if api_id is None: api_id = create_empty_api(module, client, endpoint_type) - api_data = get_api_definitions(module, swagger_file=swagger_file, - swagger_dict=swagger_dict, swagger_text=swagger_text) + api_data = get_api_definitions( + module, swagger_file=swagger_file, swagger_dict=swagger_dict, swagger_text=swagger_text + ) conf_res, dep_res = ensure_api_in_correct_state(module, client, api_id, api_data) if state == "absent": del_res = delete_rest_api(module, client, api_id) @@ -231,11 +232,11 @@ def main(): exit_args = {"changed": changed, "api_id": api_id} if conf_res is not None: - exit_args['configure_response'] = camel_dict_to_snake_dict(conf_res) + exit_args["configure_response"] = camel_dict_to_snake_dict(conf_res) if dep_res is not None: - exit_args['deploy_response'] = camel_dict_to_snake_dict(dep_res) + exit_args["deploy_response"] = camel_dict_to_snake_dict(dep_res) if del_res is not None: - exit_args['delete_response'] = camel_dict_to_snake_dict(del_res) + exit_args["delete_response"] = camel_dict_to_snake_dict(del_res) module.exit_json(**exit_args) @@ -255,7 +256,7 @@ def get_api_definitions(module, swagger_file=None, swagger_dict=None, swagger_te apidata = swagger_text if apidata is None: - module.fail_json(msg='module error - no swagger info provided') + module.fail_json(msg="module error - no swagger info provided") return apidata @@ -302,7 +303,7 @@ def ensure_api_in_correct_state(module, client, api_id, api_data): deploy_response = None - stage = module.params.get('stage') + stage = module.params.get("stage") if stage: try: deploy_response = create_deployment(client, api_id, **module.params) @@ -313,12 +314,14 @@ def ensure_api_in_correct_state(module, client, api_id, api_data): return configure_response, deploy_response -retry_params = {"retries": 10, "delay": 10, "catch_extra_error_codes": ['TooManyRequestsException']} +retry_params = {"retries": 10, "delay": 10, "catch_extra_error_codes": ["TooManyRequestsException"]} @AWSRetry.jittered_backoff(**retry_params) def create_api(client, name=None, description=None, endpoint_type=None): - return client.create_rest_api(name="ansible-temp-api", description=description, endpointConfiguration={'types': [endpoint_type]}) + return client.create_rest_api( + name="ansible-temp-api", description=description, endpointConfiguration={"types": [endpoint_type]} + ) @AWSRetry.jittered_backoff(**retry_params) @@ -333,32 +336,32 @@ def configure_api(client, api_id, api_data=None, mode="overwrite"): @AWSRetry.jittered_backoff(**retry_params) def create_deployment(client, rest_api_id, **params): - canary_settings = params.get('stage_canary_settings') + canary_settings = params.get("stage_canary_settings") if canary_settings and len(canary_settings) > 0: result = client.create_deployment( restApiId=rest_api_id, - stageName=params.get('stage'), - description=params.get('deploy_desc'), - cacheClusterEnabled=params.get('cache_enabled'), - cacheClusterSize=params.get('cache_size'), - variables=params.get('stage_variables'), + stageName=params.get("stage"), + description=params.get("deploy_desc"), + cacheClusterEnabled=params.get("cache_enabled"), + cacheClusterSize=params.get("cache_size"), + variables=params.get("stage_variables"), canarySettings=canary_settings, - tracingEnabled=params.get('tracing_enabled') + tracingEnabled=params.get("tracing_enabled"), ) else: result = client.create_deployment( restApiId=rest_api_id, - stageName=params.get('stage'), - description=params.get('deploy_desc'), - cacheClusterEnabled=params.get('cache_enabled'), - cacheClusterSize=params.get('cache_size'), - variables=params.get('stage_variables'), - tracingEnabled=params.get('tracing_enabled') + stageName=params.get("stage"), + description=params.get("deploy_desc"), + cacheClusterEnabled=params.get("cache_enabled"), + cacheClusterSize=params.get("cache_size"), + variables=params.get("stage_variables"), + tracingEnabled=params.get("tracing_enabled"), ) return result -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/api_gateway_domain.py b/plugins/modules/api_gateway_domain.py index a1afeaec95f..10a1ca1f2f7 100644 --- a/plugins/modules/api_gateway_domain.py +++ b/plugins/modules/api_gateway_domain.py @@ -129,12 +129,12 @@ def get_domain(module, client): - domain_name = module.params.get('domain_name') + domain_name = module.params.get("domain_name") result = {} try: - result['domain'] = get_domain_name(client, domain_name) - result['path_mappings'] = get_domain_mappings(client, domain_name) - except is_boto3_error_code('NotFoundException'): + result["domain"] = get_domain_name(client, domain_name) + result["path_mappings"] = get_domain_mappings(client, domain_name) + except is_boto3_error_code("NotFoundException"): return None except (ClientError, BotoCoreError, EndpointConnectionError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="getting API GW domain") @@ -142,28 +142,28 @@ def get_domain(module, client): def create_domain(module, client): - path_mappings = module.params.get('domain_mappings', []) - domain_name = module.params.get('domain_name') - result = {'domain': {}, 'path_mappings': []} + path_mappings = module.params.get("domain_mappings", []) + domain_name = module.params.get("domain_name") + result = {"domain": {}, "path_mappings": []} try: - result['domain'] = create_domain_name( + result["domain"] = create_domain_name( module, client, domain_name, - module.params.get('certificate_arn'), - module.params.get('endpoint_type'), - module.params.get('security_policy') + module.params.get("certificate_arn"), + module.params.get("endpoint_type"), + module.params.get("security_policy"), ) for mapping in path_mappings: - base_path = mapping.get('base_path', '') - rest_api_id = mapping.get('rest_api_id') - stage = mapping.get('stage') + base_path = mapping.get("base_path", "") + rest_api_id = mapping.get("rest_api_id") + stage = mapping.get("stage") if rest_api_id is None or stage is None: - module.fail_json('Every domain mapping needs a rest_api_id and stage name') + module.fail_json("Every domain mapping needs a rest_api_id and stage name") - result['path_mappings'].append(add_domain_mapping(client, domain_name, base_path, rest_api_id, stage)) + result["path_mappings"].append(add_domain_mapping(client, domain_name, base_path, rest_api_id, stage)) except (ClientError, BotoCoreError, EndpointConnectionError) as e: module.fail_json_aws(e, msg="creating API GW domain") @@ -171,54 +171,56 @@ def create_domain(module, client): def update_domain(module, client, existing_domain): - domain_name = module.params.get('domain_name') + domain_name = module.params.get("domain_name") result = existing_domain - result['updated'] = False + result["updated"] = False - domain = existing_domain.get('domain') + domain = existing_domain.get("domain") # Compare only relevant set of domain arguments. # As get_domain_name gathers all kind of state information that can't be set anyways. # Also this module doesn't support custom TLS cert setup params as they are kind of deprecated already and would increase complexity. existing_domain_settings = { - 'certificate_arn': domain.get('certificate_arn'), - 'security_policy': domain.get('security_policy'), - 'endpoint_type': domain.get('endpoint_configuration').get('types')[0] + "certificate_arn": domain.get("certificate_arn"), + "security_policy": domain.get("security_policy"), + "endpoint_type": domain.get("endpoint_configuration").get("types")[0], } specified_domain_settings = { - 'certificate_arn': module.params.get('certificate_arn'), - 'security_policy': module.params.get('security_policy'), - 'endpoint_type': module.params.get('endpoint_type') + "certificate_arn": module.params.get("certificate_arn"), + "security_policy": module.params.get("security_policy"), + "endpoint_type": module.params.get("endpoint_type"), } if specified_domain_settings != existing_domain_settings: try: - result['domain'] = update_domain_name(client, domain_name, **snake_dict_to_camel_dict(specified_domain_settings)) - result['updated'] = True + result["domain"] = update_domain_name( + client, domain_name, **snake_dict_to_camel_dict(specified_domain_settings) + ) + result["updated"] = True except (ClientError, BotoCoreError, EndpointConnectionError) as e: module.fail_json_aws(e, msg="updating API GW domain") - existing_mappings = copy.deepcopy(existing_domain.get('path_mappings', [])) + existing_mappings = copy.deepcopy(existing_domain.get("path_mappings", [])) # Cleanout `base_path: "(none)"` elements from dicts as those won't match with specified mappings for mapping in existing_mappings: - if mapping.get('base_path', 'missing') == '(none)': - mapping.pop('base_path') + if mapping.get("base_path", "missing") == "(none)": + mapping.pop("base_path") - specified_mappings = copy.deepcopy(module.params.get('domain_mappings', [])) + specified_mappings = copy.deepcopy(module.params.get("domain_mappings", [])) # Cleanout `base_path: ""` elements from dicts as those won't match with existing mappings for mapping in specified_mappings: - if mapping.get('base_path', 'missing') == '': - mapping.pop('base_path') + if mapping.get("base_path", "missing") == "": + mapping.pop("base_path") if specified_mappings != existing_mappings: try: # When lists missmatch delete all existing mappings before adding new ones as specified - for mapping in existing_domain.get('path_mappings', []): - delete_domain_mapping(client, domain_name, mapping['base_path']) - for mapping in module.params.get('domain_mappings', []): - result['path_mappings'] = add_domain_mapping( - client, domain_name, mapping.get('base_path', ''), mapping.get('rest_api_id'), mapping.get('stage') + for mapping in existing_domain.get("path_mappings", []): + delete_domain_mapping(client, domain_name, mapping["base_path"]) + for mapping in module.params.get("domain_mappings", []): + result["path_mappings"] = add_domain_mapping( + client, domain_name, mapping.get("base_path", ""), mapping.get("rest_api_id"), mapping.get("stage") ) - result['updated'] = True + result["updated"] = True except (ClientError, BotoCoreError, EndpointConnectionError) as e: module.fail_json_aws(e, msg="updating API GW domain mapping") @@ -226,7 +228,7 @@ def update_domain(module, client, existing_domain): def delete_domain(module, client): - domain_name = module.params.get('domain_name') + domain_name = module.params.get("domain_name") try: result = delete_domain_name(client, domain_name) except (ClientError, BotoCoreError, EndpointConnectionError) as e: @@ -244,19 +246,19 @@ def get_domain_name(client, domain_name): @AWSRetry.jittered_backoff(**retry_params) def get_domain_mappings(client, domain_name): - return client.get_base_path_mappings(domainName=domain_name, limit=200).get('items', []) + return client.get_base_path_mappings(domainName=domain_name, limit=200).get("items", []) @AWSRetry.jittered_backoff(**retry_params) def create_domain_name(module, client, domain_name, certificate_arn, endpoint_type, security_policy): - endpoint_configuration = {'types': [endpoint_type]} + endpoint_configuration = {"types": [endpoint_type]} - if endpoint_type == 'EDGE': + if endpoint_type == "EDGE": return client.create_domain_name( domainName=domain_name, certificateArn=certificate_arn, endpointConfiguration=endpoint_configuration, - securityPolicy=security_policy + securityPolicy=security_policy, ) else: # Use regionalCertificateArn for regional domain deploys @@ -264,13 +266,15 @@ def create_domain_name(module, client, domain_name, certificate_arn, endpoint_ty domainName=domain_name, regionalCertificateArn=certificate_arn, endpointConfiguration=endpoint_configuration, - securityPolicy=security_policy + securityPolicy=security_policy, ) @AWSRetry.jittered_backoff(**retry_params) def add_domain_mapping(client, domain_name, base_path, rest_api_id, stage): - return client.create_base_path_mapping(domainName=domain_name, basePath=base_path, restApiId=rest_api_id, stage=stage) + return client.create_base_path_mapping( + domainName=domain_name, basePath=base_path, restApiId=rest_api_id, stage=stage + ) @AWSRetry.jittered_backoff(**retry_params) @@ -298,29 +302,29 @@ def delete_domain_mapping(client, domain_name, base_path): def main(): argument_spec = dict( - domain_name=dict(type='str', required=True), - certificate_arn=dict(type='str', required=True), - security_policy=dict(type='str', default='TLS_1_2', choices=['TLS_1_0', 'TLS_1_2']), - endpoint_type=dict(type='str', default='EDGE', choices=['EDGE', 'REGIONAL', 'PRIVATE']), - domain_mappings=dict(type='list', required=True, elements='dict'), - state=dict(type='str', default='present', choices=['present', 'absent']) + domain_name=dict(type="str", required=True), + certificate_arn=dict(type="str", required=True), + security_policy=dict(type="str", default="TLS_1_2", choices=["TLS_1_0", "TLS_1_2"]), + endpoint_type=dict(type="str", default="EDGE", choices=["EDGE", "REGIONAL", "PRIVATE"]), + domain_mappings=dict(type="list", required=True, elements="dict"), + state=dict(type="str", default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=False + supports_check_mode=False, ) - client = module.client('apigateway') + client = module.client("apigateway") - state = module.params.get('state') + state = module.params.get("state") changed = False if state == "present": existing_domain = get_domain(module, client) if existing_domain is not None: result = update_domain(module, client, existing_domain) - changed = result['updated'] + changed = result["updated"] else: result = create_domain(module, client) changed = True @@ -331,10 +335,10 @@ def main(): exit_args = {"changed": changed} if result is not None: - exit_args['response'] = result + exit_args["response"] = result module.exit_json(**exit_args) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/application_autoscaling_policy.py b/plugins/modules/application_autoscaling_policy.py index 1b8669d84c8..8bbd91728a6 100644 --- a/plugins/modules/application_autoscaling_policy.py +++ b/plugins/modules/application_autoscaling_policy.py @@ -297,13 +297,13 @@ # Merge the results of the scalable target creation and policy deletion/creation # There's no risk in overriding values since mutual keys have the same values in our case def merge_results(scalable_target_result, policy_result): - if scalable_target_result['changed'] or policy_result['changed']: + if scalable_target_result["changed"] or policy_result["changed"]: changed = True else: changed = False - merged_response = scalable_target_result['response'].copy() - merged_response.update(policy_result['response']) + merged_response = scalable_target_result["response"].copy() + merged_response.update(policy_result["response"]) return {"changed": changed, "response": merged_response} @@ -312,22 +312,22 @@ def delete_scaling_policy(connection, module): changed = False try: scaling_policy = connection.describe_scaling_policies( - ServiceNamespace=module.params.get('service_namespace'), - ResourceId=module.params.get('resource_id'), - ScalableDimension=module.params.get('scalable_dimension'), - PolicyNames=[module.params.get('policy_name')], - MaxResults=1 + ServiceNamespace=module.params.get("service_namespace"), + ResourceId=module.params.get("resource_id"), + ScalableDimension=module.params.get("scalable_dimension"), + PolicyNames=[module.params.get("policy_name")], + MaxResults=1, ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe scaling policies") - if scaling_policy['ScalingPolicies']: + if scaling_policy["ScalingPolicies"]: try: connection.delete_scaling_policy( - ServiceNamespace=module.params.get('service_namespace'), - ResourceId=module.params.get('resource_id'), - ScalableDimension=module.params.get('scalable_dimension'), - PolicyName=module.params.get('policy_name'), + ServiceNamespace=module.params.get("service_namespace"), + ResourceId=module.params.get("resource_id"), + ScalableDimension=module.params.get("scalable_dimension"), + PolicyName=module.params.get("policy_name"), ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -341,11 +341,11 @@ def create_scalable_target(connection, module): try: scalable_targets = connection.describe_scalable_targets( - ServiceNamespace=module.params.get('service_namespace'), + ServiceNamespace=module.params.get("service_namespace"), ResourceIds=[ - module.params.get('resource_id'), + module.params.get("resource_id"), ], - ScalableDimension=module.params.get('scalable_dimension') + ScalableDimension=module.params.get("scalable_dimension"), ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe scalable targets") @@ -353,41 +353,38 @@ def create_scalable_target(connection, module): # Scalable target registration will occur if: # 1. There is no scalable target registered for this service # 2. A scalable target exists, different min/max values are defined and override is set to "yes" - if ( - not scalable_targets['ScalableTargets'] - or ( - module.params.get('override_task_capacity') - and ( - scalable_targets['ScalableTargets'][0]['MinCapacity'] != module.params.get('minimum_tasks') - or scalable_targets['ScalableTargets'][0]['MaxCapacity'] != module.params.get('maximum_tasks') - ) + if not scalable_targets["ScalableTargets"] or ( + module.params.get("override_task_capacity") + and ( + scalable_targets["ScalableTargets"][0]["MinCapacity"] != module.params.get("minimum_tasks") + or scalable_targets["ScalableTargets"][0]["MaxCapacity"] != module.params.get("maximum_tasks") ) ): changed = True try: connection.register_scalable_target( - ServiceNamespace=module.params.get('service_namespace'), - ResourceId=module.params.get('resource_id'), - ScalableDimension=module.params.get('scalable_dimension'), - MinCapacity=module.params.get('minimum_tasks'), - MaxCapacity=module.params.get('maximum_tasks') + ServiceNamespace=module.params.get("service_namespace"), + ResourceId=module.params.get("resource_id"), + ScalableDimension=module.params.get("scalable_dimension"), + MinCapacity=module.params.get("minimum_tasks"), + MaxCapacity=module.params.get("maximum_tasks"), ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to register scalable target") try: response = connection.describe_scalable_targets( - ServiceNamespace=module.params.get('service_namespace'), + ServiceNamespace=module.params.get("service_namespace"), ResourceIds=[ - module.params.get('resource_id'), + module.params.get("resource_id"), ], - ScalableDimension=module.params.get('scalable_dimension') + ScalableDimension=module.params.get("scalable_dimension"), ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe scalable targets") - if (response['ScalableTargets']): - snaked_response = camel_dict_to_snake_dict(response['ScalableTargets'][0]) + if response["ScalableTargets"]: + snaked_response = camel_dict_to_snake_dict(response["ScalableTargets"][0]) else: snaked_response = {} @@ -397,78 +394,82 @@ def create_scalable_target(connection, module): def create_scaling_policy(connection, module): try: scaling_policy = connection.describe_scaling_policies( - ServiceNamespace=module.params.get('service_namespace'), - ResourceId=module.params.get('resource_id'), - ScalableDimension=module.params.get('scalable_dimension'), - PolicyNames=[module.params.get('policy_name')], - MaxResults=1 + ServiceNamespace=module.params.get("service_namespace"), + ResourceId=module.params.get("resource_id"), + ScalableDimension=module.params.get("scalable_dimension"), + PolicyNames=[module.params.get("policy_name")], + MaxResults=1, ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe scaling policies") changed = False - if scaling_policy['ScalingPolicies']: - scaling_policy = scaling_policy['ScalingPolicies'][0] + if scaling_policy["ScalingPolicies"]: + scaling_policy = scaling_policy["ScalingPolicies"][0] # check if the input parameters are equal to what's already configured - for attr in ('PolicyName', - 'ServiceNamespace', - 'ResourceId', - 'ScalableDimension', - 'PolicyType', - 'StepScalingPolicyConfiguration', - 'TargetTrackingScalingPolicyConfiguration'): + for attr in ( + "PolicyName", + "ServiceNamespace", + "ResourceId", + "ScalableDimension", + "PolicyType", + "StepScalingPolicyConfiguration", + "TargetTrackingScalingPolicyConfiguration", + ): if attr in scaling_policy and scaling_policy[attr] != module.params.get(_camel_to_snake(attr)): changed = True scaling_policy[attr] = module.params.get(_camel_to_snake(attr)) else: changed = True scaling_policy = { - 'PolicyName': module.params.get('policy_name'), - 'ServiceNamespace': module.params.get('service_namespace'), - 'ResourceId': module.params.get('resource_id'), - 'ScalableDimension': module.params.get('scalable_dimension'), - 'PolicyType': module.params.get('policy_type'), - 'StepScalingPolicyConfiguration': module.params.get('step_scaling_policy_configuration'), - 'TargetTrackingScalingPolicyConfiguration': module.params.get('target_tracking_scaling_policy_configuration') + "PolicyName": module.params.get("policy_name"), + "ServiceNamespace": module.params.get("service_namespace"), + "ResourceId": module.params.get("resource_id"), + "ScalableDimension": module.params.get("scalable_dimension"), + "PolicyType": module.params.get("policy_type"), + "StepScalingPolicyConfiguration": module.params.get("step_scaling_policy_configuration"), + "TargetTrackingScalingPolicyConfiguration": module.params.get( + "target_tracking_scaling_policy_configuration" + ), } if changed: try: - if (module.params.get('step_scaling_policy_configuration')): + if module.params.get("step_scaling_policy_configuration"): connection.put_scaling_policy( - PolicyName=scaling_policy['PolicyName'], - ServiceNamespace=scaling_policy['ServiceNamespace'], - ResourceId=scaling_policy['ResourceId'], - ScalableDimension=scaling_policy['ScalableDimension'], - PolicyType=scaling_policy['PolicyType'], - StepScalingPolicyConfiguration=scaling_policy['StepScalingPolicyConfiguration'] + PolicyName=scaling_policy["PolicyName"], + ServiceNamespace=scaling_policy["ServiceNamespace"], + ResourceId=scaling_policy["ResourceId"], + ScalableDimension=scaling_policy["ScalableDimension"], + PolicyType=scaling_policy["PolicyType"], + StepScalingPolicyConfiguration=scaling_policy["StepScalingPolicyConfiguration"], ) - elif (module.params.get('target_tracking_scaling_policy_configuration')): + elif module.params.get("target_tracking_scaling_policy_configuration"): connection.put_scaling_policy( - PolicyName=scaling_policy['PolicyName'], - ServiceNamespace=scaling_policy['ServiceNamespace'], - ResourceId=scaling_policy['ResourceId'], - ScalableDimension=scaling_policy['ScalableDimension'], - PolicyType=scaling_policy['PolicyType'], - TargetTrackingScalingPolicyConfiguration=scaling_policy['TargetTrackingScalingPolicyConfiguration'] + PolicyName=scaling_policy["PolicyName"], + ServiceNamespace=scaling_policy["ServiceNamespace"], + ResourceId=scaling_policy["ResourceId"], + ScalableDimension=scaling_policy["ScalableDimension"], + PolicyType=scaling_policy["PolicyType"], + TargetTrackingScalingPolicyConfiguration=scaling_policy["TargetTrackingScalingPolicyConfiguration"], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to create scaling policy") try: response = connection.describe_scaling_policies( - ServiceNamespace=module.params.get('service_namespace'), - ResourceId=module.params.get('resource_id'), - ScalableDimension=module.params.get('scalable_dimension'), - PolicyNames=[module.params.get('policy_name')], - MaxResults=1 + ServiceNamespace=module.params.get("service_namespace"), + ResourceId=module.params.get("resource_id"), + ScalableDimension=module.params.get("scalable_dimension"), + PolicyNames=[module.params.get("policy_name")], + MaxResults=1, ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe scaling policies") - if (response['ScalingPolicies']): - snaked_response = camel_dict_to_snake_dict(response['ScalingPolicies'][0]) + if response["ScalingPolicies"]: + snaked_response = camel_dict_to_snake_dict(response["ScalingPolicies"][0]) else: snaked_response = {} @@ -477,52 +478,63 @@ def create_scaling_policy(connection, module): def main(): argument_spec = dict( - state=dict(type='str', required=True, choices=['present', 'absent']), - policy_name=dict(type='str', required=True), - service_namespace=dict(type='str', required=True, choices=['appstream', 'dynamodb', 'ec2', 'ecs', 'elasticmapreduce']), - resource_id=dict(type='str', required=True), - scalable_dimension=dict(type='str', - required=True, - choices=['ecs:service:DesiredCount', - 'ec2:spot-fleet-request:TargetCapacity', - 'elasticmapreduce:instancegroup:InstanceCount', - 'appstream:fleet:DesiredCapacity', - 'dynamodb:table:ReadCapacityUnits', - 'dynamodb:table:WriteCapacityUnits', - 'dynamodb:index:ReadCapacityUnits', - 'dynamodb:index:WriteCapacityUnits']), - policy_type=dict(type='str', required=True, choices=['StepScaling', 'TargetTrackingScaling']), - step_scaling_policy_configuration=dict(type='dict'), + state=dict(type="str", required=True, choices=["present", "absent"]), + policy_name=dict(type="str", required=True), + service_namespace=dict( + type="str", required=True, choices=["appstream", "dynamodb", "ec2", "ecs", "elasticmapreduce"] + ), + resource_id=dict(type="str", required=True), + scalable_dimension=dict( + type="str", + required=True, + choices=[ + "ecs:service:DesiredCount", + "ec2:spot-fleet-request:TargetCapacity", + "elasticmapreduce:instancegroup:InstanceCount", + "appstream:fleet:DesiredCapacity", + "dynamodb:table:ReadCapacityUnits", + "dynamodb:table:WriteCapacityUnits", + "dynamodb:index:ReadCapacityUnits", + "dynamodb:index:WriteCapacityUnits", + ], + ), + policy_type=dict(type="str", required=True, choices=["StepScaling", "TargetTrackingScaling"]), + step_scaling_policy_configuration=dict(type="dict"), target_tracking_scaling_policy_configuration=dict( - type='dict', + type="dict", options=dict( - CustomizedMetricSpecification=dict(type='dict'), - DisableScaleIn=dict(type='bool'), - PredefinedMetricSpecification=dict(type='dict'), - ScaleInCooldown=dict(type='int'), - ScaleOutCooldown=dict(type='int'), - TargetValue=dict(type='float'), - ) + CustomizedMetricSpecification=dict(type="dict"), + DisableScaleIn=dict(type="bool"), + PredefinedMetricSpecification=dict(type="dict"), + ScaleInCooldown=dict(type="int"), + ScaleOutCooldown=dict(type="int"), + TargetValue=dict(type="float"), + ), ), - minimum_tasks=dict(type='int'), - maximum_tasks=dict(type='int'), - override_task_capacity=dict(type='bool'), + minimum_tasks=dict(type="int"), + maximum_tasks=dict(type="int"), + override_task_capacity=dict(type="bool"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('application-autoscaling') + connection = module.client("application-autoscaling") # Remove any target_tracking_scaling_policy_configuration suboptions that are None policy_config_options = [ - 'CustomizedMetricSpecification', 'DisableScaleIn', 'PredefinedMetricSpecification', 'ScaleInCooldown', 'ScaleOutCooldown', 'TargetValue' + "CustomizedMetricSpecification", + "DisableScaleIn", + "PredefinedMetricSpecification", + "ScaleInCooldown", + "ScaleOutCooldown", + "TargetValue", ] - if isinstance(module.params['target_tracking_scaling_policy_configuration'], dict): + if isinstance(module.params["target_tracking_scaling_policy_configuration"], dict): for option in policy_config_options: - if module.params['target_tracking_scaling_policy_configuration'][option] is None: - module.params['target_tracking_scaling_policy_configuration'].pop(option) + if module.params["target_tracking_scaling_policy_configuration"][option] is None: + module.params["target_tracking_scaling_policy_configuration"].pop(option) - if module.params.get("state") == 'present': + if module.params.get("state") == "present": # A scalable target must be registered prior to creating a scaling policy scalable_target_result = create_scalable_target(connection, module) policy_result = create_scaling_policy(connection, module) @@ -535,5 +547,5 @@ def main(): module.exit_json(**policy_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/autoscaling_complete_lifecycle_action.py b/plugins/modules/autoscaling_complete_lifecycle_action.py index 2b752c9a4b8..94a8d031fdd 100644 --- a/plugins/modules/autoscaling_complete_lifecycle_action.py +++ b/plugins/modules/autoscaling_complete_lifecycle_action.py @@ -70,26 +70,26 @@ def main(): argument_spec = dict( - asg_name=dict(required=True, type='str'), - lifecycle_hook_name=dict(required=True, type='str'), - lifecycle_action_result=dict(required=True, type='str', choices=['CONTINUE', 'ABANDON']), - instance_id=dict(required=True, type='str') + asg_name=dict(required=True, type="str"), + lifecycle_hook_name=dict(required=True, type="str"), + lifecycle_action_result=dict(required=True, type="str", choices=["CONTINUE", "ABANDON"]), + instance_id=dict(required=True, type="str"), ) module = AnsibleAWSModule(argument_spec=argument_spec) - asg_name = module.params.get('asg_name') - lifecycle_hook_name = module.params.get('lifecycle_hook_name') - lifecycle_action_result = module.params.get('lifecycle_action_result') - instance_id = module.params.get('instance_id') + asg_name = module.params.get("asg_name") + lifecycle_hook_name = module.params.get("lifecycle_hook_name") + lifecycle_action_result = module.params.get("lifecycle_action_result") + instance_id = module.params.get("instance_id") - autoscaling = module.client('autoscaling') + autoscaling = module.client("autoscaling") try: results = autoscaling.complete_lifecycle_action( LifecycleHookName=lifecycle_hook_name, AutoScalingGroupName=asg_name, LifecycleActionResult=lifecycle_action_result, - InstanceId=instance_id + InstanceId=instance_id, ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to completes the lifecycle action") @@ -97,5 +97,5 @@ def main(): module.exit_json(results=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/autoscaling_instance_refresh.py b/plugins/modules/autoscaling_instance_refresh.py index 7cf82132e64..5b9855d135d 100644 --- a/plugins/modules/autoscaling_instance_refresh.py +++ b/plugins/modules/autoscaling_instance_refresh.py @@ -179,75 +179,75 @@ def start_or_cancel_instance_refresh(conn, module): } """ - asg_state = module.params.get('state') - asg_name = module.params.get('name') - preferences = module.params.get('preferences') + asg_state = module.params.get("state") + asg_name = module.params.get("name") + preferences = module.params.get("preferences") args = {} - args['AutoScalingGroupName'] = asg_name - if asg_state == 'started': - args['Strategy'] = module.params.get('strategy') + args["AutoScalingGroupName"] = asg_name + if asg_state == "started": + args["Strategy"] = module.params.get("strategy") if preferences: - if asg_state == 'cancelled': - module.fail_json(msg='can not pass preferences dict when canceling a refresh') + if asg_state == "cancelled": + module.fail_json(msg="can not pass preferences dict when canceling a refresh") _prefs = scrub_none_parameters(preferences) - args['Preferences'] = snake_dict_to_camel_dict(_prefs, capitalize_first=True) + args["Preferences"] = snake_dict_to_camel_dict(_prefs, capitalize_first=True) cmd_invocations = { - 'cancelled': conn.cancel_instance_refresh, - 'started': conn.start_instance_refresh, + "cancelled": conn.cancel_instance_refresh, + "started": conn.start_instance_refresh, } try: if module.check_mode: - if asg_state == 'started': - ongoing_refresh = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name).get('InstanceRefreshes', '[]') + if asg_state == "started": + ongoing_refresh = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name).get( + "InstanceRefreshes", "[]" + ) if ongoing_refresh: - module.exit_json(changed=False, msg='In check_mode - Instance Refresh is already in progress, can not start new instance refresh.') + module.exit_json( + changed=False, + msg="In check_mode - Instance Refresh is already in progress, can not start new instance refresh.", + ) else: - module.exit_json(changed=True, msg='Would have started instance refresh if not in check mode.') - elif asg_state == 'cancelled': - ongoing_refresh = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name).get('InstanceRefreshes', '[]')[0] - if ongoing_refresh.get('Status', '') in ['Cancelling', 'Cancelled']: - module.exit_json(changed=False, msg='In check_mode - Instance Refresh already cancelled or is pending cancellation.') + module.exit_json(changed=True, msg="Would have started instance refresh if not in check mode.") + elif asg_state == "cancelled": + ongoing_refresh = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name).get( + "InstanceRefreshes", "[]" + )[0] + if ongoing_refresh.get("Status", "") in ["Cancelling", "Cancelled"]: + module.exit_json( + changed=False, + msg="In check_mode - Instance Refresh already cancelled or is pending cancellation.", + ) elif not ongoing_refresh: - module.exit_json(chaned=False, msg='In check_mode - No active referesh found, nothing to cancel.') + module.exit_json(chaned=False, msg="In check_mode - No active referesh found, nothing to cancel.") else: - module.exit_json(changed=True, msg='Would have cancelled instance refresh if not in check mode.') + module.exit_json(changed=True, msg="Would have cancelled instance refresh if not in check mode.") result = cmd_invocations[asg_state](aws_retry=True, **args) - instance_refreshes = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name, InstanceRefreshIds=[result['InstanceRefreshId']]) - result = dict( - instance_refreshes=camel_dict_to_snake_dict(instance_refreshes['InstanceRefreshes'][0]) + instance_refreshes = conn.describe_instance_refreshes( + AutoScalingGroupName=asg_name, InstanceRefreshIds=[result["InstanceRefreshId"]] ) + result = dict(instance_refreshes=camel_dict_to_snake_dict(instance_refreshes["InstanceRefreshes"][0])) return module.exit_json(**result) except (BotoCoreError, ClientError) as e: - module.fail_json_aws( - e, - msg='Failed to {0} InstanceRefresh'.format( - asg_state.replace('ed', '') - ) - ) + module.fail_json_aws(e, msg="Failed to {0} InstanceRefresh".format(asg_state.replace("ed", ""))) def main(): - argument_spec = dict( state=dict( - type='str', + type="str", required=True, - choices=['started', 'cancelled'], + choices=["started", "cancelled"], ), name=dict(required=True), - strategy=dict( - type='str', - default='Rolling', - required=False - ), + strategy=dict(type="str", default="Rolling", required=False), preferences=dict( - type='dict', + type="dict", required=False, options=dict( - min_healthy_percentage=dict(type='int', default=90), - instance_warmup=dict(type='int'), - ) + min_healthy_percentage=dict(type="int", default=90), + instance_warmup=dict(type="int"), + ), ), ) @@ -256,15 +256,12 @@ def main(): supports_check_mode=True, ) autoscaling = module.client( - 'autoscaling', - retry_decorator=AWSRetry.jittered_backoff( - retries=10, - catch_extra_error_codes=['InstanceRefreshInProgress'] - ) + "autoscaling", + retry_decorator=AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=["InstanceRefreshInProgress"]), ) start_or_cancel_instance_refresh(autoscaling, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/autoscaling_instance_refresh_info.py b/plugins/modules/autoscaling_instance_refresh_info.py index 64581b46829..639940b1b77 100644 --- a/plugins/modules/autoscaling_instance_refresh_info.py +++ b/plugins/modules/autoscaling_instance_refresh_info.py @@ -158,51 +158,51 @@ def find_asg_instance_refreshes(conn, module): ], 'next_token': 'string' } - """ + """ - asg_name = module.params.get('name') - asg_ids = module.params.get('ids') - asg_next_token = module.params.get('next_token') - asg_max_records = module.params.get('max_records') + asg_name = module.params.get("name") + asg_ids = module.params.get("ids") + asg_next_token = module.params.get("next_token") + asg_max_records = module.params.get("max_records") args = {} - args['AutoScalingGroupName'] = asg_name + args["AutoScalingGroupName"] = asg_name if asg_ids: - args['InstanceRefreshIds'] = asg_ids + args["InstanceRefreshIds"] = asg_ids if asg_next_token: - args['NextToken'] = asg_next_token + args["NextToken"] = asg_next_token if asg_max_records: - args['MaxRecords'] = asg_max_records + args["MaxRecords"] = asg_max_records try: instance_refreshes_result = {} response = conn.describe_instance_refreshes(**args) - if 'InstanceRefreshes' in response: + if "InstanceRefreshes" in response: instance_refreshes_dict = dict( - instance_refreshes=response['InstanceRefreshes'], next_token=response.get('next_token', '')) - instance_refreshes_result = camel_dict_to_snake_dict( - instance_refreshes_dict) + instance_refreshes=response["InstanceRefreshes"], next_token=response.get("next_token", "") + ) + instance_refreshes_result = camel_dict_to_snake_dict(instance_refreshes_dict) - while 'NextToken' in response: - args['NextToken'] = response['NextToken'] + while "NextToken" in response: + args["NextToken"] = response["NextToken"] response = conn.describe_instance_refreshes(**args) - if 'InstanceRefreshes' in response: - instance_refreshes_dict = camel_dict_to_snake_dict(dict( - instance_refreshes=response['InstanceRefreshes'], next_token=response.get('next_token', ''))) + if "InstanceRefreshes" in response: + instance_refreshes_dict = camel_dict_to_snake_dict( + dict(instance_refreshes=response["InstanceRefreshes"], next_token=response.get("next_token", "")) + ) instance_refreshes_result.update(instance_refreshes_dict) return module.exit_json(**instance_refreshes_result) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to describe InstanceRefreshes') + module.fail_json_aws(e, msg="Failed to describe InstanceRefreshes") def main(): - argument_spec = dict( - name=dict(required=True, type='str'), - ids=dict(required=False, default=[], elements='str', type='list'), - next_token=dict(required=False, default=None, type='str', no_log=True), - max_records=dict(required=False, type='int'), + name=dict(required=True, type="str"), + ids=dict(required=False, default=[], elements="str", type="list"), + next_token=dict(required=False, default=None, type="str", no_log=True), + max_records=dict(required=False, type="int"), ) module = AnsibleAWSModule( @@ -210,12 +210,9 @@ def main(): supports_check_mode=True, ) - autoscaling = module.client( - 'autoscaling', - retry_decorator=AWSRetry.jittered_backoff(retries=10) - ) + autoscaling = module.client("autoscaling", retry_decorator=AWSRetry.jittered_backoff(retries=10)) find_asg_instance_refreshes(autoscaling, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/autoscaling_launch_config.py b/plugins/modules/autoscaling_launch_config.py index b21f296ce0e..a3cd600fa70 100644 --- a/plugins/modules/autoscaling_launch_config.py +++ b/plugins/modules/autoscaling_launch_config.py @@ -457,176 +457,214 @@ def create_block_device_meta(module, volume): - if 'snapshot' not in volume and 'ephemeral' not in volume and 'no_device' not in volume: - if 'volume_size' not in volume: - module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume') - if 'snapshot' in volume: - if volume.get('volume_type') == 'io1' and 'iops' not in volume: - module.fail_json(msg='io1 volumes must have an iops value set') - if 'ephemeral' in volume: - if 'snapshot' in volume: - module.fail_json(msg='Cannot set both ephemeral and snapshot') + if "snapshot" not in volume and "ephemeral" not in volume and "no_device" not in volume: + if "volume_size" not in volume: + module.fail_json(msg="Size must be specified when creating a new volume or modifying the root volume") + if "snapshot" in volume: + if volume.get("volume_type") == "io1" and "iops" not in volume: + module.fail_json(msg="io1 volumes must have an iops value set") + if "ephemeral" in volume: + if "snapshot" in volume: + module.fail_json(msg="Cannot set both ephemeral and snapshot") return_object = {} - if 'ephemeral' in volume: - return_object['VirtualName'] = volume.get('ephemeral') + if "ephemeral" in volume: + return_object["VirtualName"] = volume.get("ephemeral") - if 'device_name' in volume: - return_object['DeviceName'] = volume.get('device_name') + if "device_name" in volume: + return_object["DeviceName"] = volume.get("device_name") - if 'no_device' in volume: - return_object['NoDevice'] = volume.get('no_device') + if "no_device" in volume: + return_object["NoDevice"] = volume.get("no_device") - if any(key in volume for key in ['snapshot', 'volume_size', 'volume_type', 'delete_on_termination', 'iops', 'throughput', 'encrypted']): - return_object['Ebs'] = {} + if any( + key in volume + for key in [ + "snapshot", + "volume_size", + "volume_type", + "delete_on_termination", + "iops", + "throughput", + "encrypted", + ] + ): + return_object["Ebs"] = {} - if 'snapshot' in volume: - return_object['Ebs']['SnapshotId'] = volume.get('snapshot') + if "snapshot" in volume: + return_object["Ebs"]["SnapshotId"] = volume.get("snapshot") - if 'volume_size' in volume: - return_object['Ebs']['VolumeSize'] = int(volume.get('volume_size', 0)) + if "volume_size" in volume: + return_object["Ebs"]["VolumeSize"] = int(volume.get("volume_size", 0)) - if 'volume_type' in volume: - return_object['Ebs']['VolumeType'] = volume.get('volume_type') + if "volume_type" in volume: + return_object["Ebs"]["VolumeType"] = volume.get("volume_type") - if 'delete_on_termination' in volume: - return_object['Ebs']['DeleteOnTermination'] = volume.get('delete_on_termination', False) + if "delete_on_termination" in volume: + return_object["Ebs"]["DeleteOnTermination"] = volume.get("delete_on_termination", False) - if 'iops' in volume: - return_object['Ebs']['Iops'] = volume.get('iops') + if "iops" in volume: + return_object["Ebs"]["Iops"] = volume.get("iops") - if 'throughput' in volume: - if volume.get('volume_type') != 'gp3': - module.fail_json(msg='The throughput parameter is supported only for GP3 volumes.') - return_object['Ebs']['Throughput'] = volume.get('throughput') + if "throughput" in volume: + if volume.get("volume_type") != "gp3": + module.fail_json(msg="The throughput parameter is supported only for GP3 volumes.") + return_object["Ebs"]["Throughput"] = volume.get("throughput") - if 'encrypted' in volume: - return_object['Ebs']['Encrypted'] = volume.get('encrypted') + if "encrypted" in volume: + return_object["Ebs"]["Encrypted"] = volume.get("encrypted") return return_object def create_launch_config(connection, module): - name = module.params.get('name') - vpc_id = module.params.get('vpc_id') + name = module.params.get("name") + vpc_id = module.params.get("vpc_id") try: - ec2_connection = module.client('ec2') + ec2_connection = module.client("ec2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") try: - security_groups = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), ec2_connection, vpc_id=vpc_id, boto3=True) + security_groups = get_ec2_security_group_ids_from_names( + module.params.get("security_groups"), ec2_connection, vpc_id=vpc_id, boto3=True + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to get Security Group IDs') + module.fail_json_aws(e, msg="Failed to get Security Group IDs") except ValueError as e: module.fail_json(msg="Failed to get Security Group IDs", exception=traceback.format_exc()) - user_data = module.params.get('user_data') - user_data_path = module.params.get('user_data_path') - volumes = module.params['volumes'] - instance_monitoring = module.params.get('instance_monitoring') - assign_public_ip = module.params.get('assign_public_ip') - instance_profile_name = module.params.get('instance_profile_name') - ebs_optimized = module.params.get('ebs_optimized') - classic_link_vpc_id = module.params.get('classic_link_vpc_id') - classic_link_vpc_security_groups = module.params.get('classic_link_vpc_security_groups') + user_data = module.params.get("user_data") + user_data_path = module.params.get("user_data_path") + volumes = module.params["volumes"] + instance_monitoring = module.params.get("instance_monitoring") + assign_public_ip = module.params.get("assign_public_ip") + instance_profile_name = module.params.get("instance_profile_name") + ebs_optimized = module.params.get("ebs_optimized") + classic_link_vpc_id = module.params.get("classic_link_vpc_id") + classic_link_vpc_security_groups = module.params.get("classic_link_vpc_security_groups") block_device_mapping = [] - convert_list = ['image_id', 'instance_type', 'instance_type', 'instance_id', 'placement_tenancy', 'key_name', 'kernel_id', 'ramdisk_id', 'spot_price'] - - launch_config = (snake_dict_to_camel_dict(dict((k.capitalize(), str(v)) for k, v in module.params.items() if v is not None and k in convert_list))) + convert_list = [ + "image_id", + "instance_type", + "instance_type", + "instance_id", + "placement_tenancy", + "key_name", + "kernel_id", + "ramdisk_id", + "spot_price", + ] + + launch_config = snake_dict_to_camel_dict( + dict((k.capitalize(), str(v)) for k, v in module.params.items() if v is not None and k in convert_list) + ) if user_data_path: try: - with open(user_data_path, 'r') as user_data_file: + with open(user_data_path, "r") as user_data_file: user_data = user_data_file.read() except IOError as e: module.fail_json(msg="Failed to open file for reading", exception=traceback.format_exc()) if volumes: for volume in volumes: - if 'device_name' not in volume: - module.fail_json(msg='Device name must be set for volume') + if "device_name" not in volume: + module.fail_json(msg="Device name must be set for volume") # Minimum volume size is 1GiB. We'll use volume size explicitly set to 0 to be a signal not to create this volume - if 'volume_size' not in volume or int(volume['volume_size']) > 0: + if "volume_size" not in volume or int(volume["volume_size"]) > 0: block_device_mapping.append(create_block_device_meta(module, volume)) try: - launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations') + launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get( + "LaunchConfigurations" + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe launch configuration by name") changed = False result = {} - launch_config['LaunchConfigurationName'] = name + launch_config["LaunchConfigurationName"] = name if security_groups is not None: - launch_config['SecurityGroups'] = security_groups + launch_config["SecurityGroups"] = security_groups if classic_link_vpc_id is not None: - launch_config['ClassicLinkVPCId'] = classic_link_vpc_id + launch_config["ClassicLinkVPCId"] = classic_link_vpc_id if instance_monitoring is not None: - launch_config['InstanceMonitoring'] = {'Enabled': instance_monitoring} + launch_config["InstanceMonitoring"] = {"Enabled": instance_monitoring} if classic_link_vpc_security_groups is not None: - launch_config['ClassicLinkVPCSecurityGroups'] = classic_link_vpc_security_groups + launch_config["ClassicLinkVPCSecurityGroups"] = classic_link_vpc_security_groups if block_device_mapping: - launch_config['BlockDeviceMappings'] = block_device_mapping + launch_config["BlockDeviceMappings"] = block_device_mapping if instance_profile_name is not None: - launch_config['IamInstanceProfile'] = instance_profile_name + launch_config["IamInstanceProfile"] = instance_profile_name if assign_public_ip is not None: - launch_config['AssociatePublicIpAddress'] = assign_public_ip + launch_config["AssociatePublicIpAddress"] = assign_public_ip if user_data is not None: - launch_config['UserData'] = user_data + launch_config["UserData"] = user_data if ebs_optimized is not None: - launch_config['EbsOptimized'] = ebs_optimized + launch_config["EbsOptimized"] = ebs_optimized if len(launch_configs) == 0: try: connection.create_launch_configuration(**launch_config) - launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations') + launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get( + "LaunchConfigurations" + ) changed = True if launch_configs: launch_config = launch_configs[0] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to create launch configuration") - result = (dict((k, v) for k, v in launch_config.items() - if k not in ['Connection', 'CreatedTime', 'InstanceMonitoring', 'BlockDeviceMappings'])) + result = dict( + (k, v) + for k, v in launch_config.items() + if k not in ["Connection", "CreatedTime", "InstanceMonitoring", "BlockDeviceMappings"] + ) - result['CreatedTime'] = to_text(launch_config.get('CreatedTime')) + result["CreatedTime"] = to_text(launch_config.get("CreatedTime")) try: - result['InstanceMonitoring'] = module.boolean(launch_config.get('InstanceMonitoring').get('Enabled')) + result["InstanceMonitoring"] = module.boolean(launch_config.get("InstanceMonitoring").get("Enabled")) except AttributeError: - result['InstanceMonitoring'] = False - - result['BlockDeviceMappings'] = [] - - for block_device_mapping in launch_config.get('BlockDeviceMappings', []): - result['BlockDeviceMappings'].append(dict(device_name=block_device_mapping.get('DeviceName'), virtual_name=block_device_mapping.get('VirtualName'))) - if block_device_mapping.get('Ebs') is not None: - result['BlockDeviceMappings'][-1]['ebs'] = dict( - snapshot_id=block_device_mapping.get('Ebs').get('SnapshotId'), volume_size=block_device_mapping.get('Ebs').get('VolumeSize')) + result["InstanceMonitoring"] = False + + result["BlockDeviceMappings"] = [] + + for block_device_mapping in launch_config.get("BlockDeviceMappings", []): + result["BlockDeviceMappings"].append( + dict( + device_name=block_device_mapping.get("DeviceName"), virtual_name=block_device_mapping.get("VirtualName") + ) + ) + if block_device_mapping.get("Ebs") is not None: + result["BlockDeviceMappings"][-1]["ebs"] = dict( + snapshot_id=block_device_mapping.get("Ebs").get("SnapshotId"), + volume_size=block_device_mapping.get("Ebs").get("VolumeSize"), + ) if user_data_path: - result['UserData'] = "hidden" # Otherwise, we dump binary to the user's terminal + result["UserData"] = "hidden" # Otherwise, we dump binary to the user's terminal return_object = { - 'Name': result.get('LaunchConfigurationName'), - 'CreatedTime': result.get('CreatedTime'), - 'ImageId': result.get('ImageId'), - 'Arn': result.get('LaunchConfigurationARN'), - 'SecurityGroups': result.get('SecurityGroups'), - 'InstanceType': result.get('InstanceType'), - 'Result': result + "Name": result.get("LaunchConfigurationName"), + "CreatedTime": result.get("CreatedTime"), + "ImageId": result.get("ImageId"), + "Arn": result.get("LaunchConfigurationARN"), + "SecurityGroups": result.get("SecurityGroups"), + "InstanceType": result.get("InstanceType"), + "Result": result, } module.exit_json(changed=changed, **camel_dict_to_snake_dict(return_object)) @@ -634,10 +672,14 @@ def create_launch_config(connection, module): def delete_launch_config(connection, module): try: - name = module.params.get('name') - launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations') + name = module.params.get("name") + launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get( + "LaunchConfigurations" + ) if launch_configs: - connection.delete_launch_configuration(LaunchConfigurationName=launch_configs[0].get('LaunchConfigurationName')) + connection.delete_launch_configuration( + LaunchConfigurationName=launch_configs[0].get("LaunchConfigurationName") + ) module.exit_json(changed=True) else: module.exit_json(changed=False) @@ -651,42 +693,42 @@ def main(): image_id=dict(), instance_id=dict(), key_name=dict(), - security_groups=dict(default=[], type='list', elements='str'), + security_groups=dict(default=[], type="list", elements="str"), user_data=dict(), - user_data_path=dict(type='path'), + user_data_path=dict(type="path"), kernel_id=dict(), - volumes=dict(type='list', elements='dict'), + volumes=dict(type="list", elements="dict"), instance_type=dict(), - state=dict(default='present', choices=['present', 'absent']), - spot_price=dict(type='float'), + state=dict(default="present", choices=["present", "absent"]), + spot_price=dict(type="float"), ramdisk_id=dict(), instance_profile_name=dict(), - ebs_optimized=dict(default=False, type='bool'), - instance_monitoring=dict(default=False, type='bool'), - assign_public_ip=dict(type='bool'), - classic_link_vpc_security_groups=dict(type='list', elements='str'), + ebs_optimized=dict(default=False, type="bool"), + instance_monitoring=dict(default=False, type="bool"), + assign_public_ip=dict(type="bool"), + classic_link_vpc_security_groups=dict(type="list", elements="str"), classic_link_vpc_id=dict(), vpc_id=dict(), - placement_tenancy=dict(choices=['default', 'dedicated']) + placement_tenancy=dict(choices=["default", "dedicated"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[['user_data', 'user_data_path']], + mutually_exclusive=[["user_data", "user_data_path"]], ) try: - connection = module.client('autoscaling') + connection = module.client("autoscaling") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="unable to establish connection") - state = module.params.get('state') + state = module.params.get("state") - if state == 'present': + if state == "present": create_launch_config(connection, module) - elif state == 'absent': + elif state == "absent": delete_launch_config(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/autoscaling_launch_config_find.py b/plugins/modules/autoscaling_launch_config_find.py index 8f3ca14bec3..037c21ed9f9 100644 --- a/plugins/modules/autoscaling_launch_config_find.py +++ b/plugins/modules/autoscaling_launch_config_find.py @@ -141,50 +141,46 @@ def find_launch_configs(client, module): - name_regex = module.params.get('name_regex') - sort_order = module.params.get('sort_order') - limit = module.params.get('limit') + name_regex = module.params.get("name_regex") + sort_order = module.params.get("sort_order") + limit = module.params.get("limit") - paginator = client.get_paginator('describe_launch_configurations') + paginator = client.get_paginator("describe_launch_configurations") - response_iterator = paginator.paginate( - PaginationConfig={ - 'MaxItems': 1000, - 'PageSize': 100 - } - ) + response_iterator = paginator.paginate(PaginationConfig={"MaxItems": 1000, "PageSize": 100}) results = [] for response in response_iterator: - response['LaunchConfigurations'] = filter(lambda lc: re.compile(name_regex).match(lc['LaunchConfigurationName']), - response['LaunchConfigurations']) + response["LaunchConfigurations"] = filter( + lambda lc: re.compile(name_regex).match(lc["LaunchConfigurationName"]), response["LaunchConfigurations"] + ) - for lc in response['LaunchConfigurations']: + for lc in response["LaunchConfigurations"]: data = { - 'name': lc['LaunchConfigurationName'], - 'arn': lc['LaunchConfigurationARN'], - 'created_time': lc['CreatedTime'], - 'user_data': lc['UserData'], - 'instance_type': lc['InstanceType'], - 'image_id': lc['ImageId'], - 'ebs_optimized': lc['EbsOptimized'], - 'instance_monitoring': lc['InstanceMonitoring'], - 'classic_link_vpc_security_groups': lc['ClassicLinkVPCSecurityGroups'], - 'block_device_mappings': lc['BlockDeviceMappings'], - 'keyname': lc['KeyName'], - 'security_groups': lc['SecurityGroups'], - 'kernel_id': lc['KernelId'], - 'ram_disk_id': lc['RamdiskId'], - 'associate_public_address': lc.get('AssociatePublicIpAddress', False), + "name": lc["LaunchConfigurationName"], + "arn": lc["LaunchConfigurationARN"], + "created_time": lc["CreatedTime"], + "user_data": lc["UserData"], + "instance_type": lc["InstanceType"], + "image_id": lc["ImageId"], + "ebs_optimized": lc["EbsOptimized"], + "instance_monitoring": lc["InstanceMonitoring"], + "classic_link_vpc_security_groups": lc["ClassicLinkVPCSecurityGroups"], + "block_device_mappings": lc["BlockDeviceMappings"], + "keyname": lc["KeyName"], + "security_groups": lc["SecurityGroups"], + "kernel_id": lc["KernelId"], + "ram_disk_id": lc["RamdiskId"], + "associate_public_address": lc.get("AssociatePublicIpAddress", False), } results.append(data) - results.sort(key=lambda e: e['name'], reverse=(sort_order == 'descending')) + results.sort(key=lambda e: e["name"], reverse=(sort_order == "descending")) if limit: - results = results[:int(limit)] + results = results[:int(limit)] # fmt: skip module.exit_json(changed=False, results=results) @@ -192,8 +188,8 @@ def find_launch_configs(client, module): def main(): argument_spec = dict( name_regex=dict(required=True), - sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']), - limit=dict(required=False, type='int'), + sort_order=dict(required=False, default="ascending", choices=["ascending", "descending"]), + limit=dict(required=False, type="int"), ) module = AnsibleAWSModule( @@ -201,12 +197,12 @@ def main(): ) try: - client = module.client('autoscaling') + client = module.client("autoscaling") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") find_launch_configs(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/autoscaling_launch_config_info.py b/plugins/modules/autoscaling_launch_config_info.py index 73e8fbdd8da..f5123c2ef00 100644 --- a/plugins/modules/autoscaling_launch_config_info.py +++ b/plugins/modules/autoscaling_launch_config_info.py @@ -159,29 +159,28 @@ def list_launch_configs(connection, module): - launch_config_name = module.params.get("name") - sort = module.params.get('sort') - sort_order = module.params.get('sort_order') - sort_start = module.params.get('sort_start') - sort_end = module.params.get('sort_end') + sort = module.params.get("sort") + sort_order = module.params.get("sort_order") + sort_start = module.params.get("sort_start") + sort_end = module.params.get("sort_end") try: - pg = connection.get_paginator('describe_launch_configurations') + pg = connection.get_paginator("describe_launch_configurations") launch_configs = pg.paginate(LaunchConfigurationNames=launch_config_name).build_full_result() except botocore.exceptions.ClientError as e: module.fail_json_aws(e, msg="Failed to list launch configs") snaked_launch_configs = [] - for launch_config in launch_configs['LaunchConfigurations']: + for launch_config in launch_configs["LaunchConfigurations"]: snaked_launch_configs.append(camel_dict_to_snake_dict(launch_config)) for launch_config in snaked_launch_configs: - if 'CreatedTime' in launch_config: - launch_config['CreatedTime'] = str(launch_config['CreatedTime']) + if "CreatedTime" in launch_config: + launch_config["CreatedTime"] = str(launch_config["CreatedTime"]) if sort: - snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order == 'descending')) + snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order == "descending")) if sort and sort_start and sort_end: snaked_launch_configs = snaked_launch_configs[sort_start:sort_end] @@ -195,13 +194,23 @@ def list_launch_configs(connection, module): def main(): argument_spec = dict( - name=dict(required=False, default=[], type='list', elements='str'), - sort=dict(required=False, default=None, - choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']), - sort_order=dict(required=False, default='ascending', - choices=['ascending', 'descending']), - sort_start=dict(required=False, type='int'), - sort_end=dict(required=False, type='int'), + name=dict(required=False, default=[], type="list", elements="str"), + sort=dict( + required=False, + default=None, + choices=[ + "launch_configuration_name", + "image_id", + "created_time", + "instance_type", + "kernel_id", + "ramdisk_id", + "key_name", + ], + ), + sort_order=dict(required=False, default="ascending", choices=["ascending", "descending"]), + sort_start=dict(required=False, type="int"), + sort_end=dict(required=False, type="int"), ) module = AnsibleAWSModule( @@ -210,12 +219,12 @@ def main(): ) try: - connection = module.client('autoscaling') + connection = module.client("autoscaling") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") list_launch_configs(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/autoscaling_lifecycle_hook.py b/plugins/modules/autoscaling_lifecycle_hook.py index a3b8edb499b..a77fcce0ad0 100644 --- a/plugins/modules/autoscaling_lifecycle_hook.py +++ b/plugins/modules/autoscaling_lifecycle_hook.py @@ -141,56 +141,58 @@ def create_lifecycle_hook(connection, module): - - lch_name = module.params.get('lifecycle_hook_name') - asg_name = module.params.get('autoscaling_group_name') - transition = module.params.get('transition') - role_arn = module.params.get('role_arn') - notification_target_arn = module.params.get('notification_target_arn') - notification_meta_data = module.params.get('notification_meta_data') - heartbeat_timeout = module.params.get('heartbeat_timeout') - default_result = module.params.get('default_result') + lch_name = module.params.get("lifecycle_hook_name") + asg_name = module.params.get("autoscaling_group_name") + transition = module.params.get("transition") + role_arn = module.params.get("role_arn") + notification_target_arn = module.params.get("notification_target_arn") + notification_meta_data = module.params.get("notification_meta_data") + heartbeat_timeout = module.params.get("heartbeat_timeout") + default_result = module.params.get("default_result") return_object = {} - return_object['changed'] = False + return_object["changed"] = False lch_params = { - 'LifecycleHookName': lch_name, - 'AutoScalingGroupName': asg_name, - 'LifecycleTransition': transition + "LifecycleHookName": lch_name, + "AutoScalingGroupName": asg_name, + "LifecycleTransition": transition, } if role_arn: - lch_params['RoleARN'] = role_arn + lch_params["RoleARN"] = role_arn if notification_target_arn: - lch_params['NotificationTargetARN'] = notification_target_arn + lch_params["NotificationTargetARN"] = notification_target_arn if notification_meta_data: - lch_params['NotificationMetadata'] = notification_meta_data + lch_params["NotificationMetadata"] = notification_meta_data if heartbeat_timeout: - lch_params['HeartbeatTimeout'] = heartbeat_timeout + lch_params["HeartbeatTimeout"] = heartbeat_timeout if default_result: - lch_params['DefaultResult'] = default_result + lch_params["DefaultResult"] = default_result try: existing_hook = connection.describe_lifecycle_hooks( AutoScalingGroupName=asg_name, - LifecycleHookNames=[lch_name] - )['LifecycleHooks'] + LifecycleHookNames=[lch_name], + )["LifecycleHooks"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to get Lifecycle Hook") if not existing_hook: try: if module.check_mode: - module.exit_json(changed=True, msg="Would have created AutoScalingGroup Lifecycle Hook if not in check_mode.") - return_object['changed'] = True + module.exit_json( + changed=True, msg="Would have created AutoScalingGroup Lifecycle Hook if not in check_mode." + ) + return_object["changed"] = True connection.put_lifecycle_hook(**lch_params) - return_object['lifecycle_hook_info'] = connection.describe_lifecycle_hooks( - AutoScalingGroupName=asg_name, LifecycleHookNames=[lch_name])['LifecycleHooks'] + return_object["lifecycle_hook_info"] = connection.describe_lifecycle_hooks( + AutoScalingGroupName=asg_name, LifecycleHookNames=[lch_name] + )["LifecycleHooks"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to create LifecycleHook") @@ -199,11 +201,14 @@ def create_lifecycle_hook(connection, module): if modified: try: if module.check_mode: - module.exit_json(changed=True, msg="Would have modified AutoScalingGroup Lifecycle Hook if not in check_mode.") - return_object['changed'] = True + module.exit_json( + changed=True, msg="Would have modified AutoScalingGroup Lifecycle Hook if not in check_mode." + ) + return_object["changed"] = True connection.put_lifecycle_hook(**lch_params) - return_object['lifecycle_hook_info'] = connection.describe_lifecycle_hooks( - AutoScalingGroupName=asg_name, LifecycleHookNames=[lch_name])['LifecycleHooks'] + return_object["lifecycle_hook_info"] = connection.describe_lifecycle_hooks( + AutoScalingGroupName=asg_name, LifecycleHookNames=[lch_name] + )["LifecycleHooks"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to create LifecycleHook") @@ -227,33 +232,37 @@ def dict_compare(d1, d2): def delete_lifecycle_hook(connection, module): - - lch_name = module.params.get('lifecycle_hook_name') - asg_name = module.params.get('autoscaling_group_name') + lch_name = module.params.get("lifecycle_hook_name") + asg_name = module.params.get("autoscaling_group_name") return_object = {} - return_object['changed'] = False + return_object["changed"] = False try: all_hooks = connection.describe_lifecycle_hooks( - AutoScalingGroupName=asg_name + AutoScalingGroupName=asg_name, ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to get Lifecycle Hooks") - for hook in all_hooks['LifecycleHooks']: - if hook['LifecycleHookName'] == lch_name: + for hook in all_hooks["LifecycleHooks"]: + if hook["LifecycleHookName"] == lch_name: lch_params = { - 'LifecycleHookName': lch_name, - 'AutoScalingGroupName': asg_name + "LifecycleHookName": lch_name, + "AutoScalingGroupName": asg_name, } try: if module.check_mode: - module.exit_json(changed=True, msg="Would have deleted AutoScalingGroup Lifecycle Hook if not in check_mode.") + module.exit_json( + changed=True, msg="Would have deleted AutoScalingGroup Lifecycle Hook if not in check_mode." + ) connection.delete_lifecycle_hook(**lch_params) - return_object['changed'] = True - return_object['lifecycle_hook_removed'] = {'LifecycleHookName': lch_name, 'AutoScalingGroupName': asg_name} + return_object["changed"] = True + return_object["lifecycle_hook_removed"] = { + "LifecycleHookName": lch_name, + "AutoScalingGroupName": asg_name, + } except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to delete LifecycleHook") else: @@ -264,34 +273,36 @@ def delete_lifecycle_hook(connection, module): def main(): argument_spec = dict( - autoscaling_group_name=dict(required=True, type='str'), - lifecycle_hook_name=dict(required=True, type='str'), - transition=dict(type='str', choices=['autoscaling:EC2_INSTANCE_TERMINATING', 'autoscaling:EC2_INSTANCE_LAUNCHING']), - role_arn=dict(type='str'), - notification_target_arn=dict(type='str'), - notification_meta_data=dict(type='str'), - heartbeat_timeout=dict(type='int'), - default_result=dict(default='ABANDON', choices=['ABANDON', 'CONTINUE']), - state=dict(default='present', choices=['present', 'absent']) + autoscaling_group_name=dict(required=True, type="str"), + lifecycle_hook_name=dict(required=True, type="str"), + transition=dict( + type="str", choices=["autoscaling:EC2_INSTANCE_TERMINATING", "autoscaling:EC2_INSTANCE_LAUNCHING"] + ), + role_arn=dict(type="str"), + notification_target_arn=dict(type="str"), + notification_meta_data=dict(type="str"), + heartbeat_timeout=dict(type="int"), + default_result=dict(default="ABANDON", choices=["ABANDON", "CONTINUE"]), + state=dict(default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - required_if=[['state', 'present', ['transition']]], + required_if=[["state", "present", ["transition"]]], ) - state = module.params.get('state') + state = module.params.get("state") - connection = module.client('autoscaling') + connection = module.client("autoscaling") changed = False - if state == 'present': + if state == "present": create_lifecycle_hook(connection, module) - elif state == 'absent': + elif state == "absent": delete_lifecycle_hook(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/autoscaling_policy.py b/plugins/modules/autoscaling_policy.py index b628fe7b58f..f76ce74ceb1 100644 --- a/plugins/modules/autoscaling_policy.py +++ b/plugins/modules/autoscaling_policy.py @@ -363,124 +363,132 @@ def build_target_specification(target_tracking_config): - # Initialize an empty dict() for building TargetTrackingConfiguration policies, # which will be returned targetTrackingConfig = dict() - if target_tracking_config.get('target_value'): - targetTrackingConfig['TargetValue'] = target_tracking_config['target_value'] + if target_tracking_config.get("target_value"): + targetTrackingConfig["TargetValue"] = target_tracking_config["target_value"] - if target_tracking_config.get('disable_scalein'): - targetTrackingConfig['DisableScaleIn'] = target_tracking_config['disable_scalein'] + if target_tracking_config.get("disable_scalein"): + targetTrackingConfig["DisableScaleIn"] = target_tracking_config["disable_scalein"] else: # Accounting for boto3 response - targetTrackingConfig['DisableScaleIn'] = False + targetTrackingConfig["DisableScaleIn"] = False - if target_tracking_config['predefined_metric_spec'] is not None: + if target_tracking_config["predefined_metric_spec"] is not None: # Build spec for predefined_metric_spec - targetTrackingConfig['PredefinedMetricSpecification'] = dict() - if target_tracking_config['predefined_metric_spec'].get('predefined_metric_type'): - targetTrackingConfig['PredefinedMetricSpecification']['PredefinedMetricType'] = \ - target_tracking_config['predefined_metric_spec']['predefined_metric_type'] - - if target_tracking_config['predefined_metric_spec'].get('resource_label'): - targetTrackingConfig['PredefinedMetricSpecification']['ResourceLabel'] = \ - target_tracking_config['predefined_metric_spec']['resource_label'] - - elif target_tracking_config['customized_metric_spec'] is not None: + targetTrackingConfig["PredefinedMetricSpecification"] = dict() + if target_tracking_config["predefined_metric_spec"].get("predefined_metric_type"): + targetTrackingConfig["PredefinedMetricSpecification"]["PredefinedMetricType"] = target_tracking_config[ + "predefined_metric_spec" + ]["predefined_metric_type"] + + if target_tracking_config["predefined_metric_spec"].get("resource_label"): + targetTrackingConfig["PredefinedMetricSpecification"]["ResourceLabel"] = target_tracking_config[ + "predefined_metric_spec" + ]["resource_label"] + + elif target_tracking_config["customized_metric_spec"] is not None: # Build spec for customized_metric_spec - targetTrackingConfig['CustomizedMetricSpecification'] = dict() - if target_tracking_config['customized_metric_spec'].get('metric_name'): - targetTrackingConfig['CustomizedMetricSpecification']['MetricName'] = \ - target_tracking_config['customized_metric_spec']['metric_name'] - - if target_tracking_config['customized_metric_spec'].get('namespace'): - targetTrackingConfig['CustomizedMetricSpecification']['Namespace'] = \ - target_tracking_config['customized_metric_spec']['namespace'] - - if target_tracking_config['customized_metric_spec'].get('dimensions'): - targetTrackingConfig['CustomizedMetricSpecification']['Dimensions'] = \ - target_tracking_config['customized_metric_spec']['dimensions'] - - if target_tracking_config['customized_metric_spec'].get('statistic'): - targetTrackingConfig['CustomizedMetricSpecification']['Statistic'] = \ - target_tracking_config['customized_metric_spec']['statistic'] - - if target_tracking_config['customized_metric_spec'].get('unit'): - targetTrackingConfig['CustomizedMetricSpecification']['Unit'] = \ - target_tracking_config['customized_metric_spec']['unit'] + targetTrackingConfig["CustomizedMetricSpecification"] = dict() + if target_tracking_config["customized_metric_spec"].get("metric_name"): + targetTrackingConfig["CustomizedMetricSpecification"]["MetricName"] = target_tracking_config[ + "customized_metric_spec" + ]["metric_name"] + + if target_tracking_config["customized_metric_spec"].get("namespace"): + targetTrackingConfig["CustomizedMetricSpecification"]["Namespace"] = target_tracking_config[ + "customized_metric_spec" + ]["namespace"] + + if target_tracking_config["customized_metric_spec"].get("dimensions"): + targetTrackingConfig["CustomizedMetricSpecification"]["Dimensions"] = target_tracking_config[ + "customized_metric_spec" + ]["dimensions"] + + if target_tracking_config["customized_metric_spec"].get("statistic"): + targetTrackingConfig["CustomizedMetricSpecification"]["Statistic"] = target_tracking_config[ + "customized_metric_spec" + ]["statistic"] + + if target_tracking_config["customized_metric_spec"].get("unit"): + targetTrackingConfig["CustomizedMetricSpecification"]["Unit"] = target_tracking_config[ + "customized_metric_spec" + ]["unit"] return targetTrackingConfig def create_scaling_policy(connection, module): changed = False - asg_name = module.params['asg_name'] - policy_type = module.params['policy_type'] - policy_name = module.params['name'] - - if policy_type == 'TargetTrackingScaling': - params = dict(PolicyName=policy_name, - PolicyType=policy_type, - AutoScalingGroupName=asg_name) + asg_name = module.params["asg_name"] + policy_type = module.params["policy_type"] + policy_name = module.params["name"] + + if policy_type == "TargetTrackingScaling": + params = dict(PolicyName=policy_name, PolicyType=policy_type, AutoScalingGroupName=asg_name) else: - params = dict(PolicyName=policy_name, - PolicyType=policy_type, - AutoScalingGroupName=asg_name, - AdjustmentType=module.params['adjustment_type']) + params = dict( + PolicyName=policy_name, + PolicyType=policy_type, + AutoScalingGroupName=asg_name, + AdjustmentType=module.params["adjustment_type"], + ) # min_adjustment_step attribute is only relevant if the adjustment_type # is set to percentage change in capacity, so it is a special case - if module.params['adjustment_type'] == 'PercentChangeInCapacity': - if module.params['min_adjustment_step']: - params['MinAdjustmentMagnitude'] = module.params['min_adjustment_step'] + if module.params["adjustment_type"] == "PercentChangeInCapacity": + if module.params["min_adjustment_step"]: + params["MinAdjustmentMagnitude"] = module.params["min_adjustment_step"] - if policy_type == 'SimpleScaling': + if policy_type == "SimpleScaling": # can't use required_if because it doesn't allow multiple criteria - # it's only required if policy is SimpleScaling and state is present - if not module.params['scaling_adjustment']: - module.fail_json(msg='scaling_adjustment is required when policy_type is SimpleScaling ' - 'and state is present') - params['ScalingAdjustment'] = module.params['scaling_adjustment'] - if module.params['cooldown']: - params['Cooldown'] = module.params['cooldown'] - - elif policy_type == 'StepScaling': - if not module.params['step_adjustments']: - module.fail_json(msg='step_adjustments is required when policy_type is StepScaling' - 'and state is present') - params['StepAdjustments'] = [] - for step_adjustment in module.params['step_adjustments']: - step_adjust_params = dict( - ScalingAdjustment=step_adjustment['scaling_adjustment']) - if step_adjustment.get('lower_bound'): - step_adjust_params['MetricIntervalLowerBound'] = step_adjustment['lower_bound'] - if step_adjustment.get('upper_bound'): - step_adjust_params['MetricIntervalUpperBound'] = step_adjustment['upper_bound'] - params['StepAdjustments'].append(step_adjust_params) - if module.params['metric_aggregation']: - params['MetricAggregationType'] = module.params['metric_aggregation'] - if module.params['estimated_instance_warmup']: - params['EstimatedInstanceWarmup'] = module.params['estimated_instance_warmup'] - - elif policy_type == 'TargetTrackingScaling': - if not module.params['target_tracking_config']: - module.fail_json(msg='target_tracking_config is required when policy_type is ' - 'TargetTrackingScaling and state is present') + if not module.params["scaling_adjustment"]: + module.fail_json( + msg="scaling_adjustment is required when policy_type is SimpleScaling " "and state is present" + ) + params["ScalingAdjustment"] = module.params["scaling_adjustment"] + if module.params["cooldown"]: + params["Cooldown"] = module.params["cooldown"] + + elif policy_type == "StepScaling": + if not module.params["step_adjustments"]: + module.fail_json(msg="step_adjustments is required when policy_type is StepScaling" "and state is present") + params["StepAdjustments"] = [] + for step_adjustment in module.params["step_adjustments"]: + step_adjust_params = dict(ScalingAdjustment=step_adjustment["scaling_adjustment"]) + if step_adjustment.get("lower_bound"): + step_adjust_params["MetricIntervalLowerBound"] = step_adjustment["lower_bound"] + if step_adjustment.get("upper_bound"): + step_adjust_params["MetricIntervalUpperBound"] = step_adjustment["upper_bound"] + params["StepAdjustments"].append(step_adjust_params) + if module.params["metric_aggregation"]: + params["MetricAggregationType"] = module.params["metric_aggregation"] + if module.params["estimated_instance_warmup"]: + params["EstimatedInstanceWarmup"] = module.params["estimated_instance_warmup"] + + elif policy_type == "TargetTrackingScaling": + if not module.params["target_tracking_config"]: + module.fail_json( + msg="target_tracking_config is required when policy_type is " + "TargetTrackingScaling and state is present" + ) else: - params['TargetTrackingConfiguration'] = build_target_specification(module.params.get('target_tracking_config')) - if module.params['estimated_instance_warmup']: - params['EstimatedInstanceWarmup'] = module.params['estimated_instance_warmup'] + params["TargetTrackingConfiguration"] = build_target_specification( + module.params.get("target_tracking_config") + ) + if module.params["estimated_instance_warmup"]: + params["EstimatedInstanceWarmup"] = module.params["estimated_instance_warmup"] # Ensure idempotency with policies try: - policies = connection.describe_policies(aws_retry=True, - AutoScalingGroupName=asg_name, - PolicyNames=[policy_name])['ScalingPolicies'] + policies = connection.describe_policies( + aws_retry=True, AutoScalingGroupName=asg_name, PolicyNames=[policy_name] + )["ScalingPolicies"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Failed to obtain autoscaling policy %s" % policy_name) + module.fail_json_aws(e, msg="Failed to obtain autoscaling policy %s" % policy_name) before = after = {} if not policies: @@ -500,41 +508,39 @@ def create_scaling_policy(connection, module): module.fail_json_aws(e, msg="Failed to create autoscaling policy") try: - policies = connection.describe_policies(aws_retry=True, - AutoScalingGroupName=asg_name, - PolicyNames=[policy_name])['ScalingPolicies'] + policies = connection.describe_policies( + aws_retry=True, AutoScalingGroupName=asg_name, PolicyNames=[policy_name] + )["ScalingPolicies"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Failed to obtain autoscaling policy %s" % policy_name) + module.fail_json_aws(e, msg="Failed to obtain autoscaling policy %s" % policy_name) policy = camel_dict_to_snake_dict(policies[0]) # Backward compatible return values - policy['arn'] = policy['policy_arn'] - policy['as_name'] = policy['auto_scaling_group_name'] - policy['name'] = policy['policy_name'] + policy["arn"] = policy["policy_arn"] + policy["as_name"] = policy["auto_scaling_group_name"] + policy["name"] = policy["policy_name"] if before and after: - module.exit_json(changed=changed, diff=dict( - before=before, after=after), **policy) + module.exit_json(changed=changed, diff=dict(before=before, after=after), **policy) else: module.exit_json(changed=changed, **policy) def delete_scaling_policy(connection, module): - policy_name = module.params.get('name') + policy_name = module.params.get("name") try: - policy = connection.describe_policies( - aws_retry=True, PolicyNames=[policy_name]) + policy = connection.describe_policies(aws_retry=True, PolicyNames=[policy_name]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Failed to obtain autoscaling policy %s" % policy_name) + module.fail_json_aws(e, msg="Failed to obtain autoscaling policy %s" % policy_name) - if policy['ScalingPolicies']: + if policy["ScalingPolicies"]: try: - connection.delete_policy(aws_retry=True, - AutoScalingGroupName=policy['ScalingPolicies'][0]['AutoScalingGroupName'], - PolicyName=policy_name) + connection.delete_policy( + aws_retry=True, + AutoScalingGroupName=policy["ScalingPolicies"][0]["AutoScalingGroupName"], + PolicyName=policy_name, + ) module.exit_json(changed=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to delete autoscaling policy") @@ -544,65 +550,62 @@ def delete_scaling_policy(connection, module): def main(): step_adjustment_spec = dict( - lower_bound=dict(type='int'), - upper_bound=dict(type='int'), - scaling_adjustment=dict(type='int', required=True) + lower_bound=dict(type="int"), upper_bound=dict(type="int"), scaling_adjustment=dict(type="int", required=True) ) predefined_metric_spec = dict( - predefined_metric_type=dict(type='str', choices=['ASGAverageCPUUtilization', - 'ASGAverageNetworkIn', - 'ASGAverageNetworkOut', - 'ALBRequestCountPerTarget'], required=True), - resource_label=dict(type='str') + predefined_metric_type=dict( + type="str", + choices=[ + "ASGAverageCPUUtilization", + "ASGAverageNetworkIn", + "ASGAverageNetworkOut", + "ALBRequestCountPerTarget", + ], + required=True, + ), + resource_label=dict(type="str"), ) customized_metric_spec = dict( - metric_name=dict(type='str', required=True), - namespace=dict(type='str', required=True), - statistic=dict(type='str', required=True, choices=['Average', 'Minimum', 'Maximum', 'SampleCount', 'Sum']), - dimensions=dict(type='list', elements='dict'), - unit=dict(type='str') + metric_name=dict(type="str", required=True), + namespace=dict(type="str", required=True), + statistic=dict(type="str", required=True, choices=["Average", "Minimum", "Maximum", "SampleCount", "Sum"]), + dimensions=dict(type="list", elements="dict"), + unit=dict(type="str"), ) target_tracking_spec = dict( - disable_scalein=dict(type='bool'), - target_value=dict(type='float', required=True), - predefined_metric_spec=dict(type='dict', - options=predefined_metric_spec), - customized_metric_spec=dict(type='dict', - options=customized_metric_spec) + disable_scalein=dict(type="bool"), + target_value=dict(type="float", required=True), + predefined_metric_spec=dict(type="dict", options=predefined_metric_spec), + customized_metric_spec=dict(type="dict", options=customized_metric_spec), ) argument_spec = dict( name=dict(required=True), - adjustment_type=dict(choices=['ChangeInCapacity', 'ExactCapacity', 'PercentChangeInCapacity']), + adjustment_type=dict(choices=["ChangeInCapacity", "ExactCapacity", "PercentChangeInCapacity"]), asg_name=dict(), - scaling_adjustment=dict(type='int'), - min_adjustment_step=dict(type='int'), - cooldown=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']), - metric_aggregation=dict(default='Average', choices=[ - 'Minimum', 'Maximum', 'Average']), - policy_type=dict(default='SimpleScaling', choices=[ - 'SimpleScaling', 'StepScaling', 'TargetTrackingScaling']), - target_tracking_config=dict(type='dict', options=target_tracking_spec), - step_adjustments=dict( - type='list', options=step_adjustment_spec, elements='dict'), - estimated_instance_warmup=dict(type='int') + scaling_adjustment=dict(type="int"), + min_adjustment_step=dict(type="int"), + cooldown=dict(type="int"), + state=dict(default="present", choices=["present", "absent"]), + metric_aggregation=dict(default="Average", choices=["Minimum", "Maximum", "Average"]), + policy_type=dict(default="SimpleScaling", choices=["SimpleScaling", "StepScaling", "TargetTrackingScaling"]), + target_tracking_config=dict(type="dict", options=target_tracking_spec), + step_adjustments=dict(type="list", options=step_adjustment_spec, elements="dict"), + estimated_instance_warmup=dict(type="int"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[['state', 'present', ['asg_name']]]) + module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[["state", "present", ["asg_name"]]]) - connection = module.client( - 'autoscaling', retry_decorator=AWSRetry.jittered_backoff()) - state = module.params.get('state') + connection = module.client("autoscaling", retry_decorator=AWSRetry.jittered_backoff()) + state = module.params.get("state") - if state == 'present': + if state == "present": create_scaling_policy(connection, module) - elif state == 'absent': + elif state == "absent": delete_scaling_policy(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/autoscaling_scheduled_action.py b/plugins/modules/autoscaling_scheduled_action.py index bf0d4bcc44f..9bfb70b8330 100644 --- a/plugins/modules/autoscaling_scheduled_action.py +++ b/plugins/modules/autoscaling_scheduled_action.py @@ -171,29 +171,29 @@ def format_request(): params = dict( - AutoScalingGroupName=module.params.get('autoscaling_group_name'), - ScheduledActionName=module.params.get('scheduled_action_name'), - Recurrence=module.params.get('recurrence') + AutoScalingGroupName=module.params.get("autoscaling_group_name"), + ScheduledActionName=module.params.get("scheduled_action_name"), + Recurrence=module.params.get("recurrence"), ) # Some of these params are optional - if module.params.get('desired_capacity') is not None: - params['DesiredCapacity'] = module.params.get('desired_capacity') + if module.params.get("desired_capacity") is not None: + params["DesiredCapacity"] = module.params.get("desired_capacity") - if module.params.get('min_size') is not None: - params['MinSize'] = module.params.get('min_size') + if module.params.get("min_size") is not None: + params["MinSize"] = module.params.get("min_size") - if module.params.get('max_size') is not None: - params['MaxSize'] = module.params.get('max_size') + if module.params.get("max_size") is not None: + params["MaxSize"] = module.params.get("max_size") - if module.params.get('time_zone') is not None: - params['TimeZone'] = module.params.get('time_zone') + if module.params.get("time_zone") is not None: + params["TimeZone"] = module.params.get("time_zone") - if module.params.get('start_time') is not None: - params['StartTime'] = module.params.get('start_time') + if module.params.get("start_time") is not None: + params["StartTime"] = module.params.get("start_time") - if module.params.get('end_time') is not None: - params['EndTime'] = module.params.get('end_time') + if module.params.get("end_time") is not None: + params["EndTime"] = module.params.get("end_time") return params @@ -206,8 +206,8 @@ def delete_scheduled_action(current_actions): return True params = dict( - AutoScalingGroupName=module.params.get('autoscaling_group_name'), - ScheduledActionName=module.params.get('scheduled_action_name') + AutoScalingGroupName=module.params.get("autoscaling_group_name"), + ScheduledActionName=module.params.get("scheduled_action_name"), ) try: @@ -220,8 +220,8 @@ def delete_scheduled_action(current_actions): def get_scheduled_actions(): params = dict( - AutoScalingGroupName=module.params.get('autoscaling_group_name'), - ScheduledActionNames=[module.params.get('scheduled_action_name')] + AutoScalingGroupName=module.params.get("autoscaling_group_name"), + ScheduledActionNames=[module.params.get("scheduled_action_name")], ) try: @@ -271,55 +271,53 @@ def main(): global client argument_spec = dict( - autoscaling_group_name=dict(required=True, type='str'), - scheduled_action_name=dict(required=True, type='str'), - start_time=dict(default=None, type='str'), - end_time=dict(default=None, type='str'), - time_zone=dict(default=None, type='str'), - recurrence=dict(type='str'), - min_size=dict(default=None, type='int'), - max_size=dict(default=None, type='int'), - desired_capacity=dict(default=None, type='int'), - state=dict(default='present', choices=['present', 'absent']) + autoscaling_group_name=dict(required=True, type="str"), + scheduled_action_name=dict(required=True, type="str"), + start_time=dict(default=None, type="str"), + end_time=dict(default=None, type="str"), + time_zone=dict(default=None, type="str"), + recurrence=dict(type="str"), + min_size=dict(default=None, type="int"), + max_size=dict(default=None, type="int"), + desired_capacity=dict(default=None, type="int"), + state=dict(default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=[['state', 'present', ['recurrence']]], - supports_check_mode=True + argument_spec=argument_spec, required_if=[["state", "present", ["recurrence"]]], supports_check_mode=True ) if not HAS_DATEUTIL: - module.fail_json(msg='dateutil is required for this module') + module.fail_json(msg="dateutil is required for this module") if not module.botocore_at_least("1.20.24"): - module.fail_json(msg='botocore version >= 1.20.24 is required for this module') + module.fail_json(msg="botocore version >= 1.20.24 is required for this module") - client = module.client('autoscaling', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("autoscaling", retry_decorator=AWSRetry.jittered_backoff()) current_actions = get_scheduled_actions() - state = module.params.get('state') + state = module.params.get("state") results = dict() - if state == 'present': + if state == "present": changed = put_scheduled_update_group_action(current_actions) if not module.check_mode: updated_action = get_scheduled_actions()[0] results = dict( - scheduled_action_name=updated_action.get('ScheduledActionName'), - start_time=updated_action.get('StartTime'), - end_time=updated_action.get('EndTime'), - time_zone=updated_action.get('TimeZone'), - recurrence=updated_action.get('Recurrence'), - min_size=updated_action.get('MinSize'), - max_size=updated_action.get('MaxSize'), - desired_capacity=updated_action.get('DesiredCapacity') + scheduled_action_name=updated_action.get("ScheduledActionName"), + start_time=updated_action.get("StartTime"), + end_time=updated_action.get("EndTime"), + time_zone=updated_action.get("TimeZone"), + recurrence=updated_action.get("Recurrence"), + min_size=updated_action.get("MinSize"), + max_size=updated_action.get("MaxSize"), + desired_capacity=updated_action.get("DesiredCapacity"), ) else: changed = delete_scheduled_action(current_actions) - results['changed'] = changed + results["changed"] = changed module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/aws_region_info.py b/plugins/modules/aws_region_info.py index 837e9326552..a268c13b3c8 100644 --- a/plugins/modules/aws_region_info.py +++ b/plugins/modules/aws_region_info.py @@ -70,30 +70,29 @@ def main(): argument_spec = dict( - filters=dict(default={}, type='dict') + filters=dict(default={}, type="dict"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) # Replace filter key underscores with dashes, for compatibility - sanitized_filters = dict(module.params.get('filters')) - for k in module.params.get('filters').keys(): + sanitized_filters = dict(module.params.get("filters")) + for k in module.params.get("filters").keys(): if "_" in k: - sanitized_filters[k.replace('_', '-')] = sanitized_filters[k] + sanitized_filters[k.replace("_", "-")] = sanitized_filters[k] del sanitized_filters[k] try: regions = connection.describe_regions( - aws_retry=True, - Filters=ansible_dict_to_boto3_filter_list(sanitized_filters) + aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(sanitized_filters) ) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Unable to describe regions.") - module.exit_json(regions=[camel_dict_to_snake_dict(r) for r in regions['Regions']]) + module.exit_json(regions=[camel_dict_to_snake_dict(r) for r in regions["Regions"]]) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/batch_compute_environment.py b/plugins/modules/batch_compute_environment.py index 79123501992..ffc1f19b003 100644 --- a/plugins/modules/batch_compute_environment.py +++ b/plugins/modules/batch_compute_environment.py @@ -242,6 +242,7 @@ # # --------------------------------------------------------------------------------------------------- + def set_api_params(module, module_params): """ Sets module parameters to those expected by the boto3 API. @@ -262,18 +263,19 @@ def validate_params(module): :return: """ - compute_environment_name = module.params['compute_environment_name'] + compute_environment_name = module.params["compute_environment_name"] # validate compute environment name - if not re.search(r'^[\w\_:]+$', compute_environment_name): + if not re.search(r"^[\w\_:]+$", compute_environment_name): module.fail_json( msg="Function compute_environment_name {0} is invalid. Names must contain only alphanumeric characters " - "and underscores.".format(compute_environment_name) + "and underscores.".format(compute_environment_name) ) - if not compute_environment_name.startswith('arn:aws:batch:'): + if not compute_environment_name.startswith("arn:aws:batch:"): if len(compute_environment_name) > 128: - module.fail_json(msg='compute_environment_name "{0}" exceeds 128 character limit' - .format(compute_environment_name)) + module.fail_json( + msg='compute_environment_name "{0}" exceeds 128 character limit'.format(compute_environment_name) + ) return @@ -284,13 +286,14 @@ def validate_params(module): # # --------------------------------------------------------------------------------------------------- + def get_current_compute_environment(module, client): try: environments = client.describe_compute_environments( - computeEnvironments=[module.params['compute_environment_name']] + computeEnvironments=[module.params["compute_environment_name"]] ) - if len(environments['computeEnvironments']) > 0: - return environments['computeEnvironments'][0] + if len(environments["computeEnvironments"]) > 0: + return environments["computeEnvironments"][0] else: return None except ClientError: @@ -299,42 +302,52 @@ def get_current_compute_environment(module, client): def create_compute_environment(module, client): """ - Adds a Batch compute environment + Adds a Batch compute environment - :param module: - :param client: - :return: - """ + :param module: + :param client: + :return: + """ changed = False # set API parameters - params = ( - 'compute_environment_name', 'type', 'service_role') + params = ("compute_environment_name", "type", "service_role") api_params = set_api_params(module, params) - if module.params['compute_environment_state'] is not None: - api_params['state'] = module.params['compute_environment_state'] - - compute_resources_param_list = ('minv_cpus', 'maxv_cpus', 'desiredv_cpus', 'instance_types', 'image_id', 'subnets', - 'security_group_ids', 'ec2_key_pair', 'instance_role', 'tags', 'bid_percentage', - 'spot_iam_fleet_role') + if module.params["compute_environment_state"] is not None: + api_params["state"] = module.params["compute_environment_state"] + + compute_resources_param_list = ( + "minv_cpus", + "maxv_cpus", + "desiredv_cpus", + "instance_types", + "image_id", + "subnets", + "security_group_ids", + "ec2_key_pair", + "instance_role", + "tags", + "bid_percentage", + "spot_iam_fleet_role", + ) compute_resources_params = set_api_params(module, compute_resources_param_list) - if module.params['compute_resource_type'] is not None: - compute_resources_params['type'] = module.params['compute_resource_type'] + if module.params["compute_resource_type"] is not None: + compute_resources_params["type"] = module.params["compute_resource_type"] # if module.params['minv_cpus'] is not None: # compute_resources_params['minvCpus'] = module.params['minv_cpus'] - api_params['computeResources'] = compute_resources_params + api_params["computeResources"] = compute_resources_params try: if not module.check_mode: client.create_compute_environment(**api_params) changed = True except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg='Error creating compute environment') + module.fail_json_aws(e, msg="Error creating compute environment") return changed @@ -351,29 +364,29 @@ def remove_compute_environment(module, client): changed = False # set API parameters - api_params = {'computeEnvironment': module.params['compute_environment_name']} + api_params = {"computeEnvironment": module.params["compute_environment_name"]} try: if not module.check_mode: client.delete_compute_environment(**api_params) changed = True except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg='Error removing compute environment') + module.fail_json_aws(e, msg="Error removing compute environment") return changed def manage_state(module, client): changed = False - current_state = 'absent' - state = module.params['state'] - compute_environment_state = module.params['compute_environment_state'] - compute_environment_name = module.params['compute_environment_name'] - service_role = module.params['service_role'] - minv_cpus = module.params['minv_cpus'] - maxv_cpus = module.params['maxv_cpus'] - desiredv_cpus = module.params['desiredv_cpus'] - action_taken = 'none' - update_env_response = '' + current_state = "absent" + state = module.params["state"] + compute_environment_state = module.params["compute_environment_state"] + compute_environment_name = module.params["compute_environment_name"] + service_role = module.params["service_role"] + minv_cpus = module.params["minv_cpus"] + maxv_cpus = module.params["maxv_cpus"] + desiredv_cpus = module.params["desiredv_cpus"] + action_taken = "none" + update_env_response = "" check_mode = module.check_mode @@ -381,37 +394,40 @@ def manage_state(module, client): current_compute_environment = get_current_compute_environment(module, client) response = current_compute_environment if current_compute_environment: - current_state = 'present' + current_state = "present" - if state == 'present': - if current_state == 'present': + if state == "present": + if current_state == "present": updates = False # Update Batch Compute Environment configuration - compute_kwargs = {'computeEnvironment': compute_environment_name} + compute_kwargs = {"computeEnvironment": compute_environment_name} # Update configuration if needed compute_resources = {} - if compute_environment_state and current_compute_environment['state'] != compute_environment_state: - compute_kwargs.update({'state': compute_environment_state}) + if compute_environment_state and current_compute_environment["state"] != compute_environment_state: + compute_kwargs.update({"state": compute_environment_state}) updates = True - if service_role and current_compute_environment['serviceRole'] != service_role: - compute_kwargs.update({'serviceRole': service_role}) + if service_role and current_compute_environment["serviceRole"] != service_role: + compute_kwargs.update({"serviceRole": service_role}) updates = True - if minv_cpus is not None and current_compute_environment['computeResources']['minvCpus'] != minv_cpus: - compute_resources['minvCpus'] = minv_cpus - if maxv_cpus is not None and current_compute_environment['computeResources']['maxvCpus'] != maxv_cpus: - compute_resources['maxvCpus'] = maxv_cpus - if desiredv_cpus is not None and current_compute_environment['computeResources']['desiredvCpus'] != desiredv_cpus: - compute_resources['desiredvCpus'] = desiredv_cpus + if minv_cpus is not None and current_compute_environment["computeResources"]["minvCpus"] != minv_cpus: + compute_resources["minvCpus"] = minv_cpus + if maxv_cpus is not None and current_compute_environment["computeResources"]["maxvCpus"] != maxv_cpus: + compute_resources["maxvCpus"] = maxv_cpus + if ( + desiredv_cpus is not None + and current_compute_environment["computeResources"]["desiredvCpus"] != desiredv_cpus + ): + compute_resources["desiredvCpus"] = desiredv_cpus if len(compute_resources) > 0: - compute_kwargs['computeResources'] = compute_resources + compute_kwargs["computeResources"] = compute_resources updates = True if updates: try: if not check_mode: update_env_response = client.update_compute_environment(**compute_kwargs) if not update_env_response: - module.fail_json(msg='Unable to get compute environment information after creating') + module.fail_json(msg="Unable to get compute environment information after creating") changed = True action_taken = "updated" except (BotoCoreError, ClientError) as e: @@ -421,15 +437,15 @@ def manage_state(module, client): # Create Batch Compute Environment changed = create_compute_environment(module, client) # Describe compute environment - action_taken = 'added' + action_taken = "added" response = get_current_compute_environment(module, client) if not response: - module.fail_json(msg='Unable to get compute environment information after creating') + module.fail_json(msg="Unable to get compute environment information after creating") else: - if current_state == 'present': + if current_state == "present": # remove the compute environment changed = remove_compute_environment(module, client) - action_taken = 'deleted' + action_taken = "deleted" return dict(changed=changed, batch_compute_environment_action=action_taken, response=response) @@ -439,6 +455,7 @@ def manage_state(module, client): # # --------------------------------------------------------------------------------------------------- + def main(): """ Main entry point. @@ -447,39 +464,36 @@ def main(): """ argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), compute_environment_name=dict(required=True), - type=dict(required=True, choices=['MANAGED', 'UNMANAGED']), - compute_environment_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']), + type=dict(required=True, choices=["MANAGED", "UNMANAGED"]), + compute_environment_state=dict(required=False, default="ENABLED", choices=["ENABLED", "DISABLED"]), service_role=dict(required=True), - compute_resource_type=dict(required=True, choices=['EC2', 'SPOT']), - minv_cpus=dict(type='int', required=True), - maxv_cpus=dict(type='int', required=True), - desiredv_cpus=dict(type='int'), - instance_types=dict(type='list', required=True, elements='str'), + compute_resource_type=dict(required=True, choices=["EC2", "SPOT"]), + minv_cpus=dict(type="int", required=True), + maxv_cpus=dict(type="int", required=True), + desiredv_cpus=dict(type="int"), + instance_types=dict(type="list", required=True, elements="str"), image_id=dict(), - subnets=dict(type='list', required=True, elements='str'), - security_group_ids=dict(type='list', required=True, elements='str'), + subnets=dict(type="list", required=True, elements="str"), + security_group_ids=dict(type="list", required=True, elements="str"), ec2_key_pair=dict(no_log=False), instance_role=dict(required=True), - tags=dict(type='dict'), - bid_percentage=dict(type='int'), + tags=dict(type="dict"), + bid_percentage=dict(type="int"), spot_iam_fleet_role=dict(), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - client = module.client('batch') + client = module.client("batch") validate_params(module) results = manage_state(module, client) - module.exit_json(**camel_dict_to_snake_dict(results, ignore_list=['Tags'])) + module.exit_json(**camel_dict_to_snake_dict(results, ignore_list=["Tags"])) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/batch_job_definition.py b/plugins/modules/batch_job_definition.py index 5eac0cacfe1..9ea5dc8cefa 100644 --- a/plugins/modules/batch_job_definition.py +++ b/plugins/modules/batch_job_definition.py @@ -265,15 +265,15 @@ def validate_params(module, batch_client): # # --------------------------------------------------------------------------------------------------- + def get_current_job_definition(module, batch_client): try: - environments = batch_client.describe_job_definitions( - jobDefinitionName=module.params['job_definition_name'] - ) - if len(environments['jobDefinitions']) > 0: - latest_revision = max(map(lambda d: d['revision'], environments['jobDefinitions'])) - latest_definition = next((x for x in environments['jobDefinitions'] if x['revision'] == latest_revision), - None) + environments = batch_client.describe_job_definitions(jobDefinitionName=module.params["job_definition_name"]) + if len(environments["jobDefinitions"]) > 0: + latest_revision = max(map(lambda d: d["revision"], environments["jobDefinitions"])) + latest_definition = next( + (x for x in environments["jobDefinitions"] if x["revision"] == latest_revision), None + ) return latest_definition return None except ClientError: @@ -282,12 +282,12 @@ def get_current_job_definition(module, batch_client): def create_job_definition(module, batch_client): """ - Adds a Batch job definition + Adds a Batch job definition - :param module: - :param batch_client: - :return: - """ + :param module: + :param batch_client: + :return: + """ changed = False @@ -296,36 +296,48 @@ def create_job_definition(module, batch_client): container_properties_params = set_api_params(module, get_container_property_params()) retry_strategy_params = set_api_params(module, get_retry_strategy_params()) - api_params['retryStrategy'] = retry_strategy_params - api_params['containerProperties'] = container_properties_params + api_params["retryStrategy"] = retry_strategy_params + api_params["containerProperties"] = container_properties_params try: if not module.check_mode: batch_client.register_job_definition(**api_params) changed = True except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Error registering job definition') + module.fail_json_aws(e, msg="Error registering job definition") return changed def get_retry_strategy_params(): - return ('attempts',) + return ("attempts",) def get_container_property_params(): - return ('image', 'vcpus', 'memory', 'command', 'job_role_arn', 'volumes', 'environment', 'mount_points', - 'readonly_root_filesystem', 'privileged', 'ulimits', 'user') + return ( + "image", + "vcpus", + "memory", + "command", + "job_role_arn", + "volumes", + "environment", + "mount_points", + "readonly_root_filesystem", + "privileged", + "ulimits", + "user", + ) def get_base_params(): - return 'job_definition_name', 'type', 'parameters' + return "job_definition_name", "type", "parameters" def get_compute_environment_order_list(module): compute_environment_order_list = [] - for ceo in module.params['compute_environment_order']: - compute_environment_order_list.append(dict(order=ceo['order'], computeEnvironment=ceo['compute_environment'])) + for ceo in module.params["compute_environment_order"]: + compute_environment_order_list.append(dict(order=ceo["order"], computeEnvironment=ceo["compute_environment"])) return compute_environment_order_list @@ -342,10 +354,10 @@ def remove_job_definition(module, batch_client): try: if not module.check_mode: - batch_client.deregister_job_definition(jobDefinition=module.params['job_definition_arn']) + batch_client.deregister_job_definition(jobDefinition=module.params["job_definition_arn"]) changed = True except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Error removing job definition') + module.fail_json_aws(e, msg="Error removing job definition") return changed @@ -358,12 +370,12 @@ def job_definition_equal(module, current_definition): break for param in get_container_property_params(): - if module.params.get(param) != current_definition.get('containerProperties').get(cc(param)): + if module.params.get(param) != current_definition.get("containerProperties").get(cc(param)): equal = False break for param in get_retry_strategy_params(): - if module.params.get(param) != current_definition.get('retryStrategy').get(cc(param)): + if module.params.get(param) != current_definition.get("retryStrategy").get(cc(param)): equal = False break @@ -372,10 +384,10 @@ def job_definition_equal(module, current_definition): def manage_state(module, batch_client): changed = False - current_state = 'absent' - state = module.params['state'] - job_definition_name = module.params['job_definition_name'] - action_taken = 'none' + current_state = "absent" + state = module.params["state"] + job_definition_name = module.params["job_definition_name"] + action_taken = "none" response = None check_mode = module.check_mode @@ -383,28 +395,28 @@ def manage_state(module, batch_client): # check if the job definition exists current_job_definition = get_current_job_definition(module, batch_client) if current_job_definition: - current_state = 'present' + current_state = "present" - if state == 'present': - if current_state == 'present': + if state == "present": + if current_state == "present": # check if definition has changed and register a new version if necessary if not job_definition_equal(module, current_job_definition): create_job_definition(module, batch_client) - action_taken = 'updated with new version' + action_taken = "updated with new version" changed = True else: # Create Job definition changed = create_job_definition(module, batch_client) - action_taken = 'added' + action_taken = "added" response = get_current_job_definition(module, batch_client) if not response: - module.fail_json(msg='Unable to get job definition information after creating/updating') + module.fail_json(msg="Unable to get job definition information after creating/updating") else: - if current_state == 'present': + if current_state == "present": # remove the Job definition changed = remove_job_definition(module, batch_client) - action_taken = 'deregistered' + action_taken = "deregistered" return dict(changed=changed, batch_job_definition_action=action_taken, response=response) @@ -414,6 +426,7 @@ def manage_state(module, batch_client): # # --------------------------------------------------------------------------------------------------- + def main(): """ Main entry point. @@ -422,32 +435,29 @@ def main(): """ argument_spec = dict( - state=dict(required=False, default='present', choices=['present', 'absent']), + state=dict(required=False, default="present", choices=["present", "absent"]), job_definition_name=dict(required=True), job_definition_arn=dict(), type=dict(required=True), - parameters=dict(type='dict'), + parameters=dict(type="dict"), image=dict(required=True), - vcpus=dict(type='int', required=True), - memory=dict(type='int', required=True), - command=dict(type='list', default=[], elements='str'), + vcpus=dict(type="int", required=True), + memory=dict(type="int", required=True), + command=dict(type="list", default=[], elements="str"), job_role_arn=dict(), - volumes=dict(type='list', default=[], elements='dict'), - environment=dict(type='list', default=[], elements='dict'), - mount_points=dict(type='list', default=[], elements='dict'), + volumes=dict(type="list", default=[], elements="dict"), + environment=dict(type="list", default=[], elements="dict"), + mount_points=dict(type="list", default=[], elements="dict"), readonly_root_filesystem=dict(), privileged=dict(), - ulimits=dict(type='list', default=[], elements='dict'), + ulimits=dict(type="list", default=[], elements="dict"), user=dict(), - attempts=dict(type='int') + attempts=dict(type="int"), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - batch_client = module.client('batch') + batch_client = module.client("batch") validate_params(module, batch_client) @@ -456,5 +466,5 @@ def main(): module.exit_json(**camel_dict_to_snake_dict(results)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/batch_job_queue.py b/plugins/modules/batch_job_queue.py index f71848bb04a..c9e253d0652 100644 --- a/plugins/modules/batch_job_queue.py +++ b/plugins/modules/batch_job_queue.py @@ -138,50 +138,49 @@ def validate_params(module): # # --------------------------------------------------------------------------------------------------- + def get_current_job_queue(module, client): try: - environments = client.describe_job_queues( - jobQueues=[module.params['job_queue_name']] - ) - return environments['jobQueues'][0] if len(environments['jobQueues']) > 0 else None + environments = client.describe_job_queues(jobQueues=[module.params["job_queue_name"]]) + return environments["jobQueues"][0] if len(environments["jobQueues"]) > 0 else None except ClientError: return None def create_job_queue(module, client): """ - Adds a Batch job queue + Adds a Batch job queue - :param module: - :param client: - :return: - """ + :param module: + :param client: + :return: + """ changed = False # set API parameters - params = ('job_queue_name', 'priority') + params = ("job_queue_name", "priority") api_params = set_api_params(module, params) - if module.params['job_queue_state'] is not None: - api_params['state'] = module.params['job_queue_state'] + if module.params["job_queue_state"] is not None: + api_params["state"] = module.params["job_queue_state"] - api_params['computeEnvironmentOrder'] = get_compute_environment_order_list(module) + api_params["computeEnvironmentOrder"] = get_compute_environment_order_list(module) try: if not module.check_mode: client.create_job_queue(**api_params) changed = True except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Error creating compute environment') + module.fail_json_aws(e, msg="Error creating compute environment") return changed def get_compute_environment_order_list(module): compute_environment_order_list = [] - for ceo in module.params['compute_environment_order']: - compute_environment_order_list.append(dict(order=ceo['order'], computeEnvironment=ceo['compute_environment'])) + for ceo in module.params["compute_environment_order"]: + compute_environment_order_list.append(dict(order=ceo["order"], computeEnvironment=ceo["compute_environment"])) return compute_environment_order_list @@ -197,25 +196,25 @@ def remove_job_queue(module, client): changed = False # set API parameters - api_params = {'jobQueue': module.params['job_queue_name']} + api_params = {"jobQueue": module.params["job_queue_name"]} try: if not module.check_mode: client.delete_job_queue(**api_params) changed = True except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Error removing job queue') + module.fail_json_aws(e, msg="Error removing job queue") return changed def manage_state(module, client): changed = False - current_state = 'absent' - state = module.params['state'] - job_queue_state = module.params['job_queue_state'] - job_queue_name = module.params['job_queue_name'] - priority = module.params['priority'] - action_taken = 'none' + current_state = "absent" + state = module.params["state"] + job_queue_state = module.params["job_queue_state"] + job_queue_name = module.params["job_queue_name"] + priority = module.params["priority"] + action_taken = "none" response = None check_mode = module.check_mode @@ -223,25 +222,25 @@ def manage_state(module, client): # check if the job queue exists current_job_queue = get_current_job_queue(module, client) if current_job_queue: - current_state = 'present' + current_state = "present" - if state == 'present': - if current_state == 'present': + if state == "present": + if current_state == "present": updates = False # Update Batch Job Queue configuration - job_kwargs = {'jobQueue': job_queue_name} + job_kwargs = {"jobQueue": job_queue_name} # Update configuration if needed - if job_queue_state and current_job_queue['state'] != job_queue_state: - job_kwargs.update({'state': job_queue_state}) + if job_queue_state and current_job_queue["state"] != job_queue_state: + job_kwargs.update({"state": job_queue_state}) updates = True - if priority is not None and current_job_queue['priority'] != priority: - job_kwargs.update({'priority': priority}) + if priority is not None and current_job_queue["priority"] != priority: + job_kwargs.update({"priority": priority}) updates = True new_compute_environment_order_list = get_compute_environment_order_list(module) - if new_compute_environment_order_list != current_job_queue['computeEnvironmentOrder']: - job_kwargs['computeEnvironmentOrder'] = new_compute_environment_order_list + if new_compute_environment_order_list != current_job_queue["computeEnvironmentOrder"]: + job_kwargs["computeEnvironmentOrder"] = new_compute_environment_order_list updates = True if updates: @@ -256,17 +255,17 @@ def manage_state(module, client): else: # Create Job Queue changed = create_job_queue(module, client) - action_taken = 'added' + action_taken = "added" # Describe job queue response = get_current_job_queue(module, client) if not response: - module.fail_json(msg='Unable to get job queue information after creating/updating') + module.fail_json(msg="Unable to get job queue information after creating/updating") else: - if current_state == 'present': + if current_state == "present": # remove the Job Queue changed = remove_job_queue(module, client) - action_taken = 'deleted' + action_taken = "deleted" return dict(changed=changed, batch_job_queue_action=action_taken, response=response) @@ -276,6 +275,7 @@ def manage_state(module, client): # # --------------------------------------------------------------------------------------------------- + def main(): """ Main entry point. @@ -284,19 +284,16 @@ def main(): """ argument_spec = dict( - state=dict(required=False, default='present', choices=['present', 'absent']), + state=dict(required=False, default="present", choices=["present", "absent"]), job_queue_name=dict(required=True), - job_queue_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']), - priority=dict(type='int', required=True), - compute_environment_order=dict(type='list', required=True, elements='dict'), + job_queue_state=dict(required=False, default="ENABLED", choices=["ENABLED", "DISABLED"]), + priority=dict(type="int", required=True), + compute_environment_order=dict(type="list", required=True, elements="dict"), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - client = module.client('batch') + client = module.client("batch") validate_params(module) @@ -305,5 +302,5 @@ def main(): module.exit_json(**camel_dict_to_snake_dict(results)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/cloudformation_exports_info.py b/plugins/modules/cloudformation_exports_info.py index 604abfd1436..3c93c6a3459 100644 --- a/plugins/modules/cloudformation_exports_info.py +++ b/plugins/modules/cloudformation_exports_info.py @@ -48,29 +48,26 @@ @AWSRetry.exponential_backoff() def list_exports(cloudformation_client): - '''Get Exports Names and Values and return in dictionary ''' - list_exports_paginator = cloudformation_client.get_paginator('list_exports') - exports = list_exports_paginator.paginate().build_full_result()['Exports'] + """Get Exports Names and Values and return in dictionary""" + list_exports_paginator = cloudformation_client.get_paginator("list_exports") + exports = list_exports_paginator.paginate().build_full_result()["Exports"] export_items = dict() for item in exports: - export_items[item['Name']] = item['Value'] + export_items[item["Name"]] = item["Value"] return export_items def main(): argument_spec = dict() - result = dict( - changed=False, - original_message='' - ) + result = dict(changed=False, original_message="") module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - cloudformation_client = module.client('cloudformation') + cloudformation_client = module.client("cloudformation") try: - result['export_items'] = list_exports(cloudformation_client) + result["export_items"] = list_exports(cloudformation_client) except (ClientError, BotoCoreError) as e: module.fail_json_aws(e) @@ -79,5 +76,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/cloudformation_stack_set.py b/plugins/modules/cloudformation_stack_set.py index e15f1c95229..1a8673a909c 100644 --- a/plugins/modules/cloudformation_stack_set.py +++ b/plugins/modules/cloudformation_stack_set.py @@ -321,9 +321,9 @@ def create_stack_set(module, stack_params, cfn): try: cfn.create_stack_set(aws_retry=True, **stack_params) - return await_stack_set_exists(cfn, stack_params['StackSetName']) + return await_stack_set_exists(cfn, stack_params["StackSetName"]) except (ClientError, BotoCoreError) as err: - module.fail_json_aws(err, msg="Failed to create stack set {0}.".format(stack_params.get('StackSetName'))) + module.fail_json_aws(err, msg="Failed to create stack set {0}.".format(stack_params.get("StackSetName"))) def update_stack_set(module, stack_params, cfn): @@ -332,22 +332,29 @@ def update_stack_set(module, stack_params, cfn): # don't need to be updated. try: cfn.update_stack_set(**stack_params) - except is_boto3_error_code('StackSetNotFound') as err: # pylint: disable=duplicate-except + except is_boto3_error_code("StackSetNotFound") as err: # pylint: disable=duplicate-except module.fail_json_aws(err, msg="Failed to find stack set. Check the name & region.") - except is_boto3_error_code('StackInstanceNotFound') as err: # pylint: disable=duplicate-except - module.fail_json_aws(err, msg="One or more stack instances were not found for this stack set. Double check " - "the `accounts` and `regions` parameters.") - except is_boto3_error_code('OperationInProgressException') as err: # pylint: disable=duplicate-except + except is_boto3_error_code("StackInstanceNotFound") as err: # pylint: disable=duplicate-except module.fail_json_aws( - err, msg="Another operation is already in progress on this stack set - please try again later. When making " - "multiple cloudformation_stack_set calls, it's best to enable `wait: true` to avoid unfinished op errors.") + err, + msg="One or more stack instances were not found for this stack set. Double check " + "the `accounts` and `regions` parameters.", + ) + except is_boto3_error_code("OperationInProgressException") as err: # pylint: disable=duplicate-except + module.fail_json_aws( + err, + msg="Another operation is already in progress on this stack set - please try again later. When making " + "multiple cloudformation_stack_set calls, it's best to enable `wait: true` to avoid unfinished op errors.", + ) except (ClientError, BotoCoreError) as err: # pylint: disable=duplicate-except module.fail_json_aws(err, msg="Could not update stack set.") - if module.params.get('wait'): + if module.params.get("wait"): await_stack_set_operation( - module, cfn, operation_id=stack_params['OperationId'], - stack_set_name=stack_params['StackSetName'], - max_wait=module.params.get('wait_timeout'), + module, + cfn, + operation_id=stack_params["OperationId"], + stack_set_name=stack_params["StackSetName"], + max_wait=module.params.get("wait_timeout"), ) return True @@ -357,20 +364,24 @@ def compare_stack_instances(cfn, stack_set_name, accounts, regions): instance_list = cfn.list_stack_instances( aws_retry=True, StackSetName=stack_set_name, - )['Summaries'] + )["Summaries"] desired_stack_instances = set(itertools.product(accounts, regions)) - existing_stack_instances = set((i['Account'], i['Region']) for i in instance_list) + existing_stack_instances = set((i["Account"], i["Region"]) for i in instance_list) # new stacks, existing stacks, unspecified stacks - return (desired_stack_instances - existing_stack_instances), existing_stack_instances, (existing_stack_instances - desired_stack_instances) + return ( + (desired_stack_instances - existing_stack_instances), + existing_stack_instances, + (existing_stack_instances - desired_stack_instances), + ) @AWSRetry.jittered_backoff(retries=3, delay=4) def stack_set_facts(cfn, stack_set_name): try: - ss = cfn.describe_stack_set(StackSetName=stack_set_name)['StackSet'] - ss['Tags'] = boto3_tag_list_to_ansible_dict(ss['Tags']) + ss = cfn.describe_stack_set(StackSetName=stack_set_name)["StackSet"] + ss["Tags"] = boto3_tag_list_to_ansible_dict(ss["Tags"]) return ss - except cfn.exceptions.from_code('StackSetNotFound'): + except cfn.exceptions.from_code("StackSetNotFound"): # Return None if the stack doesn't exist return @@ -381,23 +392,24 @@ def await_stack_set_operation(module, cfn, stack_set_name, operation_id, max_wai for i in range(max_wait // 15): try: operation = cfn.describe_stack_set_operation(StackSetName=stack_set_name, OperationId=operation_id) - if operation['StackSetOperation']['Status'] not in ('RUNNING', 'STOPPING'): + if operation["StackSetOperation"]["Status"] not in ("RUNNING", "STOPPING"): # Stack set has completed operation break - except is_boto3_error_code('StackSetNotFound'): # pylint: disable=duplicate-except + except is_boto3_error_code("StackSetNotFound"): # pylint: disable=duplicate-except pass - except is_boto3_error_code('OperationNotFound'): # pylint: disable=duplicate-except + except is_boto3_error_code("OperationNotFound"): # pylint: disable=duplicate-except pass time.sleep(15) - if operation and operation['StackSetOperation']['Status'] not in ('FAILED', 'STOPPED'): + if operation and operation["StackSetOperation"]["Status"] not in ("FAILED", "STOPPED"): await_stack_instance_completion( - module, cfn, + module, + cfn, stack_set_name=stack_set_name, # subtract however long we waited already max_wait=int(max_wait - (datetime.datetime.now() - wait_start).total_seconds()), ) - elif operation and operation['StackSetOperation']['Status'] in ('FAILED', 'STOPPED'): + elif operation and operation["StackSetOperation"]["Status"] in ("FAILED", "STOPPED"): pass else: module.warn( @@ -412,84 +424,84 @@ def await_stack_instance_completion(module, cfn, stack_set_name, max_wait): for i in range(max_wait // 15): try: stack_instances = cfn.list_stack_instances(StackSetName=stack_set_name) - to_await = [inst for inst in stack_instances['Summaries'] - if inst['Status'] != 'CURRENT'] + to_await = [inst for inst in stack_instances["Summaries"] if inst["Status"] != "CURRENT"] if not to_await: - return stack_instances['Summaries'] - except is_boto3_error_code('StackSetNotFound'): # pylint: disable=duplicate-except + return stack_instances["Summaries"] + except is_boto3_error_code("StackSetNotFound"): # pylint: disable=duplicate-except # this means the deletion beat us, or the stack set is not yet propagated pass time.sleep(15) module.warn( "Timed out waiting for stack set {0} instances {1} to complete after {2} seconds. Returning unfinished operation".format( - stack_set_name, ', '.join(s['StackId'] for s in to_await), max_wait + stack_set_name, ", ".join(s["StackId"] for s in to_await), max_wait ) ) def await_stack_set_exists(cfn, stack_set_name): # AWSRetry will retry on `StackSetNotFound` errors for us - ss = cfn.describe_stack_set(StackSetName=stack_set_name, aws_retry=True)['StackSet'] - ss['Tags'] = boto3_tag_list_to_ansible_dict(ss['Tags']) - return camel_dict_to_snake_dict(ss, ignore_list=('Tags',)) + ss = cfn.describe_stack_set(StackSetName=stack_set_name, aws_retry=True)["StackSet"] + ss["Tags"] = boto3_tag_list_to_ansible_dict(ss["Tags"]) + return camel_dict_to_snake_dict(ss, ignore_list=("Tags",)) def describe_stack_tree(module, stack_set_name, operation_ids=None): - jittered_backoff_decorator = AWSRetry.jittered_backoff(retries=5, delay=3, max_delay=5, catch_extra_error_codes=['StackSetNotFound']) - cfn = module.client('cloudformation', retry_decorator=jittered_backoff_decorator) + jittered_backoff_decorator = AWSRetry.jittered_backoff( + retries=5, delay=3, max_delay=5, catch_extra_error_codes=["StackSetNotFound"] + ) + cfn = module.client("cloudformation", retry_decorator=jittered_backoff_decorator) result = dict() - result['stack_set'] = camel_dict_to_snake_dict( + result["stack_set"] = camel_dict_to_snake_dict( cfn.describe_stack_set( StackSetName=stack_set_name, aws_retry=True, - )['StackSet'] + )["StackSet"] ) - result['stack_set']['tags'] = boto3_tag_list_to_ansible_dict(result['stack_set']['tags']) - result['operations_log'] = sorted( + result["stack_set"]["tags"] = boto3_tag_list_to_ansible_dict(result["stack_set"]["tags"]) + result["operations_log"] = sorted( camel_dict_to_snake_dict( cfn.list_stack_set_operations( StackSetName=stack_set_name, aws_retry=True, ) - )['summaries'], - key=lambda x: x['creation_timestamp'] + )["summaries"], + key=lambda x: x["creation_timestamp"], ) - result['stack_instances'] = sorted( - [ - camel_dict_to_snake_dict(i) for i in - cfn.list_stack_instances(StackSetName=stack_set_name)['Summaries'] - ], - key=lambda i: i['region'] + i['account'] + result["stack_instances"] = sorted( + [camel_dict_to_snake_dict(i) for i in cfn.list_stack_instances(StackSetName=stack_set_name)["Summaries"]], + key=lambda i: i["region"] + i["account"], ) if operation_ids: - result['operations'] = [] + result["operations"] = [] for op_id in operation_ids: try: - result['operations'].append(camel_dict_to_snake_dict( - cfn.describe_stack_set_operation( - StackSetName=stack_set_name, - OperationId=op_id, - )['StackSetOperation'] - )) - except is_boto3_error_code('OperationNotFoundException'): # pylint: disable=duplicate-except + result["operations"].append( + camel_dict_to_snake_dict( + cfn.describe_stack_set_operation( + StackSetName=stack_set_name, + OperationId=op_id, + )["StackSetOperation"] + ) + ) + except is_boto3_error_code("OperationNotFoundException"): # pylint: disable=duplicate-except pass return result def get_operation_preferences(module): params = dict() - if module.params.get('regions'): - params['RegionOrder'] = list(module.params['regions']) + if module.params.get("regions"): + params["RegionOrder"] = list(module.params["regions"]) for param, api_name in { - 'fail_count': 'FailureToleranceCount', - 'fail_percentage': 'FailureTolerancePercentage', - 'parallel_percentage': 'MaxConcurrentPercentage', - 'parallel_count': 'MaxConcurrentCount', + "fail_count": "FailureToleranceCount", + "fail_percentage": "FailureTolerancePercentage", + "parallel_percentage": "MaxConcurrentPercentage", + "parallel_count": "MaxConcurrentCount", }.items(): - if module.params.get('failure_tolerance', {}).get(param): - params[api_name] = module.params.get('failure_tolerance', {}).get(param) + if module.params.get("failure_tolerance", {}).get(param): + params[api_name] = module.params.get("failure_tolerance", {}).get(param) return params @@ -497,171 +509,173 @@ def main(): argument_spec = dict( name=dict(required=True), description=dict(), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=900), - state=dict(default='present', choices=['present', 'absent']), - purge_stacks=dict(type='bool', default=True), - parameters=dict(type='dict', default={}), - template=dict(type='path'), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=900), + state=dict(default="present", choices=["present", "absent"]), + purge_stacks=dict(type="bool", default=True), + parameters=dict(type="dict", default={}), + template=dict(type="path"), template_url=dict(), template_body=dict(), - capabilities=dict(type='list', elements='str', choices=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']), - regions=dict(type='list', elements='str'), - accounts=dict(type='list', elements='str'), + capabilities=dict(type="list", elements="str", choices=["CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"]), + regions=dict(type="list", elements="str"), + accounts=dict(type="list", elements="str"), failure_tolerance=dict( - type='dict', + type="dict", default={}, options=dict( - fail_count=dict(type='int'), - fail_percentage=dict(type='int'), - parallel_percentage=dict(type='int'), - parallel_count=dict(type='int'), + fail_count=dict(type="int"), + fail_percentage=dict(type="int"), + parallel_percentage=dict(type="int"), + parallel_count=dict(type="int"), ), mutually_exclusive=[ - ['fail_count', 'fail_percentage'], - ['parallel_count', 'parallel_percentage'], + ["fail_count", "fail_percentage"], + ["parallel_count", "parallel_percentage"], ], ), - administration_role_arn=dict(aliases=['admin_role_arn', 'administration_role', 'admin_role']), - execution_role_name=dict(aliases=['execution_role', 'exec_role', 'exec_role_name']), - tags=dict(type='dict'), + administration_role_arn=dict(aliases=["admin_role_arn", "administration_role", "admin_role"]), + execution_role_name=dict(aliases=["execution_role", "exec_role", "exec_role_name"]), + tags=dict(type="dict"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[['template_url', 'template', 'template_body']], - supports_check_mode=True + mutually_exclusive=[["template_url", "template", "template_body"]], + supports_check_mode=True, ) # Wrap the cloudformation client methods that this module uses with # automatic backoff / retry for throttling error codes - jittered_backoff_decorator = AWSRetry.jittered_backoff(retries=10, delay=3, max_delay=30, catch_extra_error_codes=['StackSetNotFound']) - cfn = module.client('cloudformation', retry_decorator=jittered_backoff_decorator) - existing_stack_set = stack_set_facts(cfn, module.params['name']) + jittered_backoff_decorator = AWSRetry.jittered_backoff( + retries=10, delay=3, max_delay=30, catch_extra_error_codes=["StackSetNotFound"] + ) + cfn = module.client("cloudformation", retry_decorator=jittered_backoff_decorator) + existing_stack_set = stack_set_facts(cfn, module.params["name"]) operation_uuid = to_native(uuid.uuid4()) operation_ids = [] # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around. stack_params = {} - state = module.params['state'] - if state == 'present' and not module.params['accounts']: + state = module.params["state"] + if state == "present" and not module.params["accounts"]: module.fail_json( msg="Can't create a stack set without choosing at least one account. " - "To get the ID of the current account, use the aws_caller_info module." + "To get the ID of the current account, use the aws_caller_info module." ) - module.params['accounts'] = [to_native(a) for a in module.params['accounts']] + module.params["accounts"] = [to_native(a) for a in module.params["accounts"]] - stack_params['StackSetName'] = module.params['name'] - if module.params.get('description'): - stack_params['Description'] = module.params['description'] + stack_params["StackSetName"] = module.params["name"] + if module.params.get("description"): + stack_params["Description"] = module.params["description"] - if module.params.get('capabilities'): - stack_params['Capabilities'] = module.params['capabilities'] + if module.params.get("capabilities"): + stack_params["Capabilities"] = module.params["capabilities"] - if module.params['template'] is not None: - with open(module.params['template'], 'r') as tpl: - stack_params['TemplateBody'] = tpl.read() - elif module.params['template_body'] is not None: - stack_params['TemplateBody'] = module.params['template_body'] - elif module.params['template_url'] is not None: - stack_params['TemplateURL'] = module.params['template_url'] + if module.params["template"] is not None: + with open(module.params["template"], "r") as tpl: + stack_params["TemplateBody"] = tpl.read() + elif module.params["template_body"] is not None: + stack_params["TemplateBody"] = module.params["template_body"] + elif module.params["template_url"] is not None: + stack_params["TemplateURL"] = module.params["template_url"] else: # no template is provided, but if the stack set exists already, we can use the existing one. if existing_stack_set: - stack_params['UsePreviousTemplate'] = True + stack_params["UsePreviousTemplate"] = True else: module.fail_json( msg="The Stack Set {0} does not exist, and no template was provided. Provide one of `template`, " - "`template_body`, or `template_url`".format(module.params['name']) + "`template_body`, or `template_url`".format(module.params["name"]) ) - stack_params['Parameters'] = [] - for k, v in module.params['parameters'].items(): + stack_params["Parameters"] = [] + for k, v in module.params["parameters"].items(): if isinstance(v, dict): # set parameter based on a dict to allow additional CFN Parameter Attributes param = dict(ParameterKey=k) - if 'value' in v: - param['ParameterValue'] = to_native(v['value']) + if "value" in v: + param["ParameterValue"] = to_native(v["value"]) - if 'use_previous_value' in v and bool(v['use_previous_value']): - param['UsePreviousValue'] = True - param.pop('ParameterValue', None) + if "use_previous_value" in v and bool(v["use_previous_value"]): + param["UsePreviousValue"] = True + param.pop("ParameterValue", None) - stack_params['Parameters'].append(param) + stack_params["Parameters"].append(param) else: # allow default k/v configuration to set a template parameter - stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)}) + stack_params["Parameters"].append({"ParameterKey": k, "ParameterValue": str(v)}) - if module.params.get('tags') and isinstance(module.params.get('tags'), dict): - stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags']) + if module.params.get("tags") and isinstance(module.params.get("tags"), dict): + stack_params["Tags"] = ansible_dict_to_boto3_tag_list(module.params["tags"]) - if module.params.get('administration_role_arn'): + if module.params.get("administration_role_arn"): # TODO loosen the semantics here to autodetect the account ID and build the ARN - stack_params['AdministrationRoleARN'] = module.params['administration_role_arn'] - if module.params.get('execution_role_name'): - stack_params['ExecutionRoleName'] = module.params['execution_role_name'] + stack_params["AdministrationRoleARN"] = module.params["administration_role_arn"] + if module.params.get("execution_role_name"): + stack_params["ExecutionRoleName"] = module.params["execution_role_name"] result = {} if module.check_mode: - if state == 'absent' and existing_stack_set: - module.exit_json(changed=True, msg='Stack set would be deleted', meta=[]) - elif state == 'absent' and not existing_stack_set: - module.exit_json(changed=False, msg='Stack set doesn\'t exist', meta=[]) - elif state == 'present' and not existing_stack_set: - module.exit_json(changed=True, msg='New stack set would be created', meta=[]) - elif state == 'present' and existing_stack_set: + if state == "absent" and existing_stack_set: + module.exit_json(changed=True, msg="Stack set would be deleted", meta=[]) + elif state == "absent" and not existing_stack_set: + module.exit_json(changed=False, msg="Stack set doesn't exist", meta=[]) + elif state == "present" and not existing_stack_set: + module.exit_json(changed=True, msg="New stack set would be created", meta=[]) + elif state == "present" and existing_stack_set: new_stacks, existing_stacks, unspecified_stacks = compare_stack_instances( cfn, - module.params['name'], - module.params['accounts'], - module.params['regions'], + module.params["name"], + module.params["accounts"], + module.params["regions"], ) if new_stacks: - module.exit_json(changed=True, msg='New stack instance(s) would be created', meta=[]) - elif unspecified_stacks and module.params.get('purge_stack_instances'): - module.exit_json(changed=True, msg='Old stack instance(s) would be deleted', meta=[]) + module.exit_json(changed=True, msg="New stack instance(s) would be created", meta=[]) + elif unspecified_stacks and module.params.get("purge_stack_instances"): + module.exit_json(changed=True, msg="Old stack instance(s) would be deleted", meta=[]) else: # TODO: need to check the template and other settings for correct check mode - module.exit_json(changed=False, msg='No changes detected', meta=[]) + module.exit_json(changed=False, msg="No changes detected", meta=[]) changed = False - if state == 'present': + if state == "present": if not existing_stack_set: # on create this parameter has a different name, and cannot be referenced later in the job log - stack_params['ClientRequestToken'] = 'Ansible-StackSet-Create-{0}'.format(operation_uuid) + stack_params["ClientRequestToken"] = "Ansible-StackSet-Create-{0}".format(operation_uuid) changed = True create_stack_set(module, stack_params, cfn) else: - stack_params['OperationId'] = 'Ansible-StackSet-Update-{0}'.format(operation_uuid) - operation_ids.append(stack_params['OperationId']) - if module.params.get('regions'): - stack_params['OperationPreferences'] = get_operation_preferences(module) + stack_params["OperationId"] = "Ansible-StackSet-Update-{0}".format(operation_uuid) + operation_ids.append(stack_params["OperationId"]) + if module.params.get("regions"): + stack_params["OperationPreferences"] = get_operation_preferences(module) changed |= update_stack_set(module, stack_params, cfn) # now create/update any appropriate stack instances new_stack_instances, existing_stack_instances, unspecified_stack_instances = compare_stack_instances( cfn, - module.params['name'], - module.params['accounts'], - module.params['regions'], + module.params["name"], + module.params["accounts"], + module.params["regions"], ) if new_stack_instances: - operation_ids.append('Ansible-StackInstance-Create-{0}'.format(operation_uuid)) + operation_ids.append("Ansible-StackInstance-Create-{0}".format(operation_uuid)) changed = True cfn.create_stack_instances( - StackSetName=module.params['name'], + StackSetName=module.params["name"], Accounts=list(set(acct for acct, region in new_stack_instances)), Regions=list(set(region for acct, region in new_stack_instances)), OperationPreferences=get_operation_preferences(module), OperationId=operation_ids[-1], ) else: - operation_ids.append('Ansible-StackInstance-Update-{0}'.format(operation_uuid)) + operation_ids.append("Ansible-StackInstance-Update-{0}".format(operation_uuid)) cfn.update_stack_instances( - StackSetName=module.params['name'], + StackSetName=module.params["name"], Accounts=list(set(acct for acct, region in existing_stack_instances)), Regions=list(set(region for acct, region in existing_stack_instances)), OperationPreferences=get_operation_preferences(module), @@ -669,55 +683,67 @@ def main(): ) for op in operation_ids: await_stack_set_operation( - module, cfn, operation_id=op, - stack_set_name=module.params['name'], - max_wait=module.params.get('wait_timeout'), + module, + cfn, + operation_id=op, + stack_set_name=module.params["name"], + max_wait=module.params.get("wait_timeout"), ) - elif state == 'absent': + elif state == "absent": if not existing_stack_set: - module.exit_json(msg='Stack set {0} does not exist'.format(module.params['name'])) - if module.params.get('purge_stack_instances') is False: + module.exit_json(msg="Stack set {0} does not exist".format(module.params["name"])) + if module.params.get("purge_stack_instances") is False: pass try: cfn.delete_stack_set( - StackSetName=module.params['name'], + StackSetName=module.params["name"], + ) + module.exit_json(msg="Stack set {0} deleted".format(module.params["name"])) + except is_boto3_error_code("OperationInProgressException") as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, msg="Cannot delete stack {0} while there is an operation in progress".format(module.params["name"]) ) - module.exit_json(msg='Stack set {0} deleted'.format(module.params['name'])) - except is_boto3_error_code('OperationInProgressException') as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Cannot delete stack {0} while there is an operation in progress'.format(module.params['name'])) - except is_boto3_error_code('StackSetNotEmptyException'): # pylint: disable=duplicate-except - delete_instances_op = 'Ansible-StackInstance-Delete-{0}'.format(operation_uuid) + except is_boto3_error_code("StackSetNotEmptyException"): # pylint: disable=duplicate-except + delete_instances_op = "Ansible-StackInstance-Delete-{0}".format(operation_uuid) cfn.delete_stack_instances( - StackSetName=module.params['name'], - Accounts=module.params['accounts'], - Regions=module.params['regions'], - RetainStacks=(not module.params.get('purge_stacks')), - OperationId=delete_instances_op + StackSetName=module.params["name"], + Accounts=module.params["accounts"], + Regions=module.params["regions"], + RetainStacks=(not module.params.get("purge_stacks")), + OperationId=delete_instances_op, ) await_stack_set_operation( - module, cfn, operation_id=delete_instances_op, - stack_set_name=stack_params['StackSetName'], - max_wait=module.params.get('wait_timeout'), + module, + cfn, + operation_id=delete_instances_op, + stack_set_name=stack_params["StackSetName"], + max_wait=module.params.get("wait_timeout"), ) try: cfn.delete_stack_set( - StackSetName=module.params['name'], + StackSetName=module.params["name"], ) - except is_boto3_error_code('StackSetNotEmptyException') as exc: # pylint: disable=duplicate-except + except is_boto3_error_code("StackSetNotEmptyException") as exc: # pylint: disable=duplicate-except # this time, it is likely that either the delete failed or there are more stacks. instances = cfn.list_stack_instances( - StackSetName=module.params['name'], + StackSetName=module.params["name"], + ) + stack_states = ", ".join( + "(account={Account}, region={Region}, state={Status})".format(**i) for i in instances["Summaries"] + ) + module.fail_json_aws( + exc, + msg="Could not purge all stacks, or not all accounts/regions were chosen for deletion: " + + stack_states, ) - stack_states = ', '.join('(account={Account}, region={Region}, state={Status})'.format(**i) for i in instances['Summaries']) - module.fail_json_aws(exc, msg='Could not purge all stacks, or not all accounts/regions were chosen for deletion: ' + stack_states) - module.exit_json(changed=True, msg='Stack set {0} deleted'.format(module.params['name'])) + module.exit_json(changed=True, msg="Stack set {0} deleted".format(module.params["name"])) - result.update(**describe_stack_tree(module, stack_params['StackSetName'], operation_ids=operation_ids)) - if any(o['status'] == 'FAILED' for o in result['operations']): + result.update(**describe_stack_tree(module, stack_params["StackSetName"], operation_ids=operation_ids)) + if any(o["status"] == "FAILED" for o in result["operations"]): module.fail_json(msg="One or more operations failed to execute", **result) module.exit_json(changed=changed, **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/cloudfront_distribution.py b/plugins/modules/cloudfront_distribution.py index 82a00b283be..ac43cada3ad 100644 --- a/plugins/modules/cloudfront_distribution.py +++ b/plugins/modules/cloudfront_distribution.py @@ -1461,41 +1461,42 @@ def ansible_list_to_cloudfront_list(list_items=None, include_quantity=True): if list_items is None: list_items = [] if not isinstance(list_items, list): - raise ValueError('Expected a list, got a {0} with value {1}'.format(type(list_items).__name__, str(list_items))) + raise ValueError("Expected a list, got a {0} with value {1}".format(type(list_items).__name__, str(list_items))) result = {} if include_quantity: - result['quantity'] = len(list_items) + result["quantity"] = len(list_items) if len(list_items) > 0: - result['items'] = list_items + result["items"] = list_items return result def create_distribution(client, module, config, tags): try: if not tags: - return client.create_distribution(aws_retry=True, DistributionConfig=config)['Distribution'] + return client.create_distribution(aws_retry=True, DistributionConfig=config)["Distribution"] else: - distribution_config_with_tags = { - 'DistributionConfig': config, - 'Tags': { - 'Items': tags - } - } - return client.create_distribution_with_tags(aws_retry=True, DistributionConfigWithTags=distribution_config_with_tags)['Distribution'] + distribution_config_with_tags = {"DistributionConfig": config, "Tags": {"Items": tags}} + return client.create_distribution_with_tags( + aws_retry=True, DistributionConfigWithTags=distribution_config_with_tags + )["Distribution"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error creating distribution") def delete_distribution(client, module, distribution): try: - return client.delete_distribution(aws_retry=True, Id=distribution['Distribution']['Id'], IfMatch=distribution['ETag']) + return client.delete_distribution( + aws_retry=True, Id=distribution["Distribution"]["Id"], IfMatch=distribution["ETag"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Error deleting distribution %s" % to_native(distribution['Distribution'])) + module.fail_json_aws(e, msg="Error deleting distribution %s" % to_native(distribution["Distribution"])) def update_distribution(client, module, config, distribution_id, e_tag): try: - return client.update_distribution(aws_retry=True, DistributionConfig=config, Id=distribution_id, IfMatch=e_tag)['Distribution'] + return client.update_distribution(aws_retry=True, DistributionConfig=config, Id=distribution_id, IfMatch=e_tag)[ + "Distribution" + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error updating distribution to %s" % to_native(config)) @@ -1517,7 +1518,7 @@ def untag_resource(client, module, arn, tag_keys): def list_tags_for_resource(client, module, arn): try: response = client.list_tags_for_resource(aws_retry=True, Resource=arn) - return boto3_tag_list_to_ansible_dict(response.get('Tags').get('Items')) + return boto3_tag_list_to_ansible_dict(response.get("Tags").get("Items")) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error listing tags for resource") @@ -1549,83 +1550,130 @@ def __init__(self, module): self.__default_https_port = 443 self.__default_ipv6_enabled = False self.__default_origin_ssl_protocols = [ - 'TLSv1', - 'TLSv1.1', - 'TLSv1.2' + "TLSv1", + "TLSv1.1", + "TLSv1.2", ] - self.__default_custom_origin_protocol_policy = 'match-viewer' + self.__default_custom_origin_protocol_policy = "match-viewer" self.__default_custom_origin_read_timeout = 30 self.__default_custom_origin_keepalive_timeout = 5 - self.__default_datetime_string = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f') + self.__default_datetime_string = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f") self.__default_cache_behavior_min_ttl = 0 self.__default_cache_behavior_max_ttl = 31536000 self.__default_cache_behavior_default_ttl = 86400 self.__default_cache_behavior_compress = False - self.__default_cache_behavior_viewer_protocol_policy = 'allow-all' + self.__default_cache_behavior_viewer_protocol_policy = "allow-all" self.__default_cache_behavior_smooth_streaming = False - self.__default_cache_behavior_forwarded_values_forward_cookies = 'none' + self.__default_cache_behavior_forwarded_values_forward_cookies = "none" self.__default_cache_behavior_forwarded_values_query_string = True self.__default_trusted_signers_enabled = False - self.__valid_price_classes = set([ - 'PriceClass_100', - 'PriceClass_200', - 'PriceClass_All' - ]) - self.__valid_origin_protocol_policies = set([ - 'http-only', - 'match-viewer', - 'https-only' - ]) - self.__valid_origin_ssl_protocols = set([ - 'SSLv3', - 'TLSv1', - 'TLSv1.1', - 'TLSv1.2' - ]) - self.__valid_cookie_forwarding = set([ - 'none', - 'whitelist', - 'all' - ]) - self.__valid_viewer_protocol_policies = set([ - 'allow-all', - 'https-only', - 'redirect-to-https' - ]) - self.__valid_methods = set([ - 'GET', - 'HEAD', - 'POST', - 'PUT', - 'PATCH', - 'OPTIONS', - 'DELETE' - ]) + self.__valid_price_classes = set( + [ + "PriceClass_100", + "PriceClass_200", + "PriceClass_All", + ] + ) + self.__valid_origin_protocol_policies = set( + [ + "http-only", + "match-viewer", + "https-only", + ] + ) + self.__valid_origin_ssl_protocols = set( + [ + "SSLv3", + "TLSv1", + "TLSv1.1", + "TLSv1.2", + ] + ) + self.__valid_cookie_forwarding = set( + [ + "none", + "whitelist", + "all", + ] + ) + self.__valid_viewer_protocol_policies = set( + [ + "allow-all", + "https-only", + "redirect-to-https", + ] + ) + self.__valid_methods = set( + [ + "GET", + "HEAD", + "POST", + "PUT", + "PATCH", + "OPTIONS", + "DELETE", + ] + ) self.__valid_methods_cached_methods = [ - set([ - 'GET', - 'HEAD' - ]), - set([ - 'GET', - 'HEAD', - 'OPTIONS' - ]) + set( + [ + "GET", + "HEAD", + ] + ), + set( + [ + "GET", + "HEAD", + "OPTIONS", + ] + ), ] self.__valid_methods_allowed_methods = [ self.__valid_methods_cached_methods[0], self.__valid_methods_cached_methods[1], - self.__valid_methods + self.__valid_methods, ] self.__valid_lambda_function_association_event_types = set( - ["viewer-request", "viewer-response", "origin-request", "origin-response"] + [ + "viewer-request", + "viewer-response", + "origin-request", + "origin-response", + ] + ) + self.__valid_viewer_certificate_ssl_support_methods = set( + [ + "sni-only", + "vip", + ] ) - self.__valid_viewer_certificate_ssl_support_methods = set(["sni-only", "vip"]) self.__valid_viewer_certificate_minimum_protocol_versions = set( - ["SSLv3", "TLSv1", "TLSv1_2016", "TLSv1.1_2016", "TLSv1.2_2018", "TLSv1.2_2019", "TLSv1.2_2021"] + [ + "SSLv3", + "TLSv1", + "TLSv1_2016", + "TLSv1.1_2016", + "TLSv1.2_2018", + "TLSv1.2_2019", + "TLSv1.2_2021", + ] + ) + self.__valid_viewer_certificate_certificate_sources = set( + [ + "cloudfront", + "iam", + "acm", + ] + ) + self.__valid_http_versions = set( + [ + "http1.1", + "http2", + "http3", + "http2and3", + ] ) - self.__valid_viewer_certificate_certificate_sources = set(["cloudfront", "iam", "acm"]) - self.__valid_http_versions = set(["http1.1", "http2", "http3", "http2and3"]) self.__s3_bucket_domain_identifier = ".s3.amazonaws.com" def add_missing_key(self, dict_object, key_to_set, value_to_set): @@ -1640,7 +1688,9 @@ def add_key_else_change_dict_key(self, dict_object, old_key, new_key, value_to_s dict_object = change_dict_key_name(dict_object, old_key, new_key) return dict_object - def add_key_else_validate(self, dict_object, key_name, attribute_name, value_to_set, valid_values, to_aws_list=False): + def add_key_else_validate( + self, dict_object, key_name, attribute_name, value_to_set, valid_values, to_aws_list=False + ): if key_name in dict_object: self.validate_attribute_with_allowed_values(value_to_set, attribute_name, valid_values) else: @@ -1655,26 +1705,38 @@ def validate_logging(self, logging): if logging is None: return None valid_logging = {} - if logging and not set(['enabled', 'include_cookies', 'bucket', 'prefix']).issubset(logging): - self.module.fail_json(msg="The logging parameters enabled, include_cookies, bucket and prefix must be specified.") - valid_logging['include_cookies'] = logging.get('include_cookies') - valid_logging['enabled'] = logging.get('enabled') - valid_logging['bucket'] = logging.get('bucket') - valid_logging['prefix'] = logging.get('prefix') + if logging and not set(["enabled", "include_cookies", "bucket", "prefix"]).issubset(logging): + self.module.fail_json( + msg="The logging parameters enabled, include_cookies, bucket and prefix must be specified." + ) + valid_logging["include_cookies"] = logging.get("include_cookies") + valid_logging["enabled"] = logging.get("enabled") + valid_logging["bucket"] = logging.get("bucket") + valid_logging["prefix"] = logging.get("prefix") return valid_logging except Exception as e: self.module.fail_json_aws(e, msg="Error validating distribution logging") def validate_is_list(self, list_to_validate, list_name): if not isinstance(list_to_validate, list): - self.module.fail_json(msg='%s is of type %s. Must be a list.' % (list_name, type(list_to_validate).__name__)) + self.module.fail_json( + msg="%s is of type %s. Must be a list." % (list_name, type(list_to_validate).__name__) + ) def validate_required_key(self, key_name, full_key_name, dict_object): if key_name not in dict_object: self.module.fail_json(msg="%s must be specified." % full_key_name) - def validate_origins(self, client, config, origins, default_origin_domain_name, - default_origin_path, create_distribution, purge_origins=False): + def validate_origins( + self, + client, + config, + origins, + default_origin_domain_name, + default_origin_path, + create_distribution, + purge_origins=False, + ): try: if origins is None: if default_origin_domain_name is None and not create_distribution: @@ -1683,23 +1745,24 @@ def validate_origins(self, client, config, origins, default_origin_domain_name, else: return ansible_list_to_cloudfront_list(config) if default_origin_domain_name is not None: - origins = [{ - 'domain_name': default_origin_domain_name, - 'origin_path': default_origin_path or '' - }] + origins = [{"domain_name": default_origin_domain_name, "origin_path": default_origin_path or ""}] else: origins = [] - self.validate_is_list(origins, 'origins') + self.validate_is_list(origins, "origins") if not origins and default_origin_domain_name is None and create_distribution: - self.module.fail_json(msg="Both origins[] and default_origin_domain_name have not been specified. Please specify at least one.") + self.module.fail_json( + msg="Both origins[] and default_origin_domain_name have not been specified. Please specify at least one." + ) all_origins = OrderedDict() new_domains = list() for origin in config: - all_origins[origin.get('domain_name')] = origin + all_origins[origin.get("domain_name")] = origin for origin in origins: - origin = self.validate_origin(client, all_origins.get(origin.get('domain_name'), {}), origin, default_origin_path) - all_origins[origin['domain_name']] = origin - new_domains.append(origin['domain_name']) + origin = self.validate_origin( + client, all_origins.get(origin.get("domain_name"), {}), origin, default_origin_path + ) + all_origins[origin["domain_name"]] = origin + new_domains.append(origin["domain_name"]) if purge_origins: for domain in list(all_origins.keys()): if domain not in new_domains: @@ -1709,46 +1772,55 @@ def validate_origins(self, client, config, origins, default_origin_domain_name, self.module.fail_json_aws(e, msg="Error validating distribution origins") def validate_s3_origin_configuration(self, client, existing_config, origin): - if origin.get('s3_origin_config', {}).get('origin_access_identity'): - return origin['s3_origin_config']['origin_access_identity'] + if origin.get("s3_origin_config", {}).get("origin_access_identity"): + return origin["s3_origin_config"]["origin_access_identity"] - if existing_config.get('s3_origin_config', {}).get('origin_access_identity'): - return existing_config['s3_origin_config']['origin_access_identity'] + if existing_config.get("s3_origin_config", {}).get("origin_access_identity"): + return existing_config["s3_origin_config"]["origin_access_identity"] try: - comment = "access-identity-by-ansible-%s-%s" % (origin.get('domain_name'), self.__default_datetime_string) - caller_reference = "%s-%s" % (origin.get('domain_name'), self.__default_datetime_string) - cfoai_config = dict(CloudFrontOriginAccessIdentityConfig=dict(CallerReference=caller_reference, - Comment=comment)) - oai = client.create_cloud_front_origin_access_identity(**cfoai_config)['CloudFrontOriginAccessIdentity']['Id'] + comment = "access-identity-by-ansible-%s-%s" % (origin.get("domain_name"), self.__default_datetime_string) + caller_reference = "%s-%s" % (origin.get("domain_name"), self.__default_datetime_string) + cfoai_config = dict( + CloudFrontOriginAccessIdentityConfig=dict(CallerReference=caller_reference, Comment=comment) + ) + oai = client.create_cloud_front_origin_access_identity(**cfoai_config)["CloudFrontOriginAccessIdentity"][ + "Id" + ] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Couldn't create Origin Access Identity for id %s" % origin['id']) + self.module.fail_json_aws(e, msg="Couldn't create Origin Access Identity for id %s" % origin["id"]) return "origin-access-identity/cloudfront/%s" % oai def validate_origin(self, client, existing_config, origin, default_origin_path): try: - origin = self.add_missing_key(origin, 'origin_path', existing_config.get('origin_path', default_origin_path or '')) - self.validate_required_key('origin_path', 'origins[].origin_path', origin) - origin = self.add_missing_key(origin, 'id', existing_config.get('id', self.__default_datetime_string)) - if 'custom_headers' in origin and len(origin.get('custom_headers')) > 0: - for custom_header in origin.get('custom_headers'): - if 'header_name' not in custom_header or 'header_value' not in custom_header: - self.module.fail_json(msg="Both origins[].custom_headers.header_name and origins[].custom_headers.header_value must be specified.") - origin['custom_headers'] = ansible_list_to_cloudfront_list(origin.get('custom_headers')) + origin = self.add_missing_key( + origin, "origin_path", existing_config.get("origin_path", default_origin_path or "") + ) + self.validate_required_key("origin_path", "origins[].origin_path", origin) + origin = self.add_missing_key(origin, "id", existing_config.get("id", self.__default_datetime_string)) + if "custom_headers" in origin and len(origin.get("custom_headers")) > 0: + for custom_header in origin.get("custom_headers"): + if "header_name" not in custom_header or "header_value" not in custom_header: + self.module.fail_json( + msg="Both origins[].custom_headers.header_name and origins[].custom_headers.header_value must be specified." + ) + origin["custom_headers"] = ansible_list_to_cloudfront_list(origin.get("custom_headers")) else: - origin['custom_headers'] = ansible_list_to_cloudfront_list() - if 'origin_shield' in origin: - origin_shield = origin.get('origin_shield') - if origin_shield.get('enabled'): - origin_shield_region = origin_shield.get('origin_shield_region') + origin["custom_headers"] = ansible_list_to_cloudfront_list() + if "origin_shield" in origin: + origin_shield = origin.get("origin_shield") + if origin_shield.get("enabled"): + origin_shield_region = origin_shield.get("origin_shield_region") if origin_shield_region is None: - self.module.fail_json(msg="origins[].origin_shield.origin_shield_region must be specified" - " when origins[].origin_shield.enabled is true.") + self.module.fail_json( + msg="origins[].origin_shield.origin_shield_region must be specified" + " when origins[].origin_shield.enabled is true." + ) else: origin_shield_region = origin_shield_region.lower() - if self.__s3_bucket_domain_identifier in origin.get('domain_name').lower(): + if self.__s3_bucket_domain_identifier in origin.get("domain_name").lower(): if origin.get("s3_origin_access_identity_enabled") is not None: - if origin['s3_origin_access_identity_enabled']: + if origin["s3_origin_access_identity_enabled"]: s3_origin_config = self.validate_s3_origin_configuration(client, existing_config, origin) else: s3_origin_config = None @@ -1762,26 +1834,47 @@ def validate_origin(self, client, existing_config, origin, default_origin_path): origin["s3_origin_config"] = dict(origin_access_identity=oai) - if 'custom_origin_config' in origin: - self.module.fail_json(msg="s3_origin_access_identity_enabled and custom_origin_config are mutually exclusive") + if "custom_origin_config" in origin: + self.module.fail_json( + msg="s3_origin_access_identity_enabled and custom_origin_config are mutually exclusive" + ) else: - origin = self.add_missing_key(origin, 'custom_origin_config', existing_config.get('custom_origin_config', {})) - custom_origin_config = origin.get('custom_origin_config') - custom_origin_config = self.add_key_else_validate(custom_origin_config, 'origin_protocol_policy', - 'origins[].custom_origin_config.origin_protocol_policy', - self.__default_custom_origin_protocol_policy, self.__valid_origin_protocol_policies) - custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_read_timeout', self.__default_custom_origin_read_timeout) - custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_keepalive_timeout', self.__default_custom_origin_keepalive_timeout) - custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'http_port', 'h_t_t_p_port', self.__default_http_port) - custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'https_port', 'h_t_t_p_s_port', self.__default_https_port) - if custom_origin_config.get('origin_ssl_protocols', {}).get('items'): - custom_origin_config['origin_ssl_protocols'] = custom_origin_config['origin_ssl_protocols']['items'] - if custom_origin_config.get('origin_ssl_protocols'): - self.validate_attribute_list_with_allowed_list(custom_origin_config['origin_ssl_protocols'], 'origins[].origin_ssl_protocols', - self.__valid_origin_ssl_protocols) + origin = self.add_missing_key( + origin, "custom_origin_config", existing_config.get("custom_origin_config", {}) + ) + custom_origin_config = origin.get("custom_origin_config") + custom_origin_config = self.add_key_else_validate( + custom_origin_config, + "origin_protocol_policy", + "origins[].custom_origin_config.origin_protocol_policy", + self.__default_custom_origin_protocol_policy, + self.__valid_origin_protocol_policies, + ) + custom_origin_config = self.add_missing_key( + custom_origin_config, "origin_read_timeout", self.__default_custom_origin_read_timeout + ) + custom_origin_config = self.add_missing_key( + custom_origin_config, "origin_keepalive_timeout", self.__default_custom_origin_keepalive_timeout + ) + custom_origin_config = self.add_key_else_change_dict_key( + custom_origin_config, "http_port", "h_t_t_p_port", self.__default_http_port + ) + custom_origin_config = self.add_key_else_change_dict_key( + custom_origin_config, "https_port", "h_t_t_p_s_port", self.__default_https_port + ) + if custom_origin_config.get("origin_ssl_protocols", {}).get("items"): + custom_origin_config["origin_ssl_protocols"] = custom_origin_config["origin_ssl_protocols"]["items"] + if custom_origin_config.get("origin_ssl_protocols"): + self.validate_attribute_list_with_allowed_list( + custom_origin_config["origin_ssl_protocols"], + "origins[].origin_ssl_protocols", + self.__valid_origin_ssl_protocols, + ) else: - custom_origin_config['origin_ssl_protocols'] = self.__default_origin_ssl_protocols - custom_origin_config['origin_ssl_protocols'] = ansible_list_to_cloudfront_list(custom_origin_config['origin_ssl_protocols']) + custom_origin_config["origin_ssl_protocols"] = self.__default_origin_ssl_protocols + custom_origin_config["origin_ssl_protocols"] = ansible_list_to_cloudfront_list( + custom_origin_config["origin_ssl_protocols"] + ) return origin except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error validating distribution origin") @@ -1795,13 +1888,16 @@ def validate_cache_behaviors(self, config, cache_behaviors, valid_origins, purge # is true (if purge_cache_behaviors is not true, we can't really know the full new order) if not purge_cache_behaviors: for behavior in config: - all_cache_behaviors[behavior['path_pattern']] = behavior + all_cache_behaviors[behavior["path_pattern"]] = behavior for cache_behavior in cache_behaviors: - valid_cache_behavior = self.validate_cache_behavior(all_cache_behaviors.get(cache_behavior.get('path_pattern'), {}), - cache_behavior, valid_origins) - all_cache_behaviors[cache_behavior['path_pattern']] = valid_cache_behavior + valid_cache_behavior = self.validate_cache_behavior( + all_cache_behaviors.get(cache_behavior.get("path_pattern"), {}), cache_behavior, valid_origins + ) + all_cache_behaviors[cache_behavior["path_pattern"]] = valid_cache_behavior if purge_cache_behaviors: - for target_origin_id in set(all_cache_behaviors.keys()) - set([cb['path_pattern'] for cb in cache_behaviors]): + for target_origin_id in set(all_cache_behaviors.keys()) - set( + [cb["path_pattern"] for cb in cache_behaviors] + ): del all_cache_behaviors[target_origin_id] return ansible_list_to_cloudfront_list(list(all_cache_behaviors.values())) except Exception as e: @@ -1812,40 +1908,61 @@ def validate_cache_behavior(self, config, cache_behavior, valid_origins, is_defa cache_behavior = {} if cache_behavior is None and valid_origins is not None: return config - cache_behavior = self.validate_cache_behavior_first_level_keys(config, cache_behavior, valid_origins, is_default_cache) - cache_behavior = self.validate_forwarded_values(config, cache_behavior.get('forwarded_values'), cache_behavior) - cache_behavior = self.validate_allowed_methods(config, cache_behavior.get('allowed_methods'), cache_behavior) - cache_behavior = self.validate_lambda_function_associations(config, cache_behavior.get('lambda_function_associations'), cache_behavior) - cache_behavior = self.validate_trusted_signers(config, cache_behavior.get('trusted_signers'), cache_behavior) - cache_behavior = self.validate_field_level_encryption_id(config, cache_behavior.get('field_level_encryption_id'), cache_behavior) + cache_behavior = self.validate_cache_behavior_first_level_keys( + config, cache_behavior, valid_origins, is_default_cache + ) + cache_behavior = self.validate_forwarded_values(config, cache_behavior.get("forwarded_values"), cache_behavior) + cache_behavior = self.validate_allowed_methods(config, cache_behavior.get("allowed_methods"), cache_behavior) + cache_behavior = self.validate_lambda_function_associations( + config, cache_behavior.get("lambda_function_associations"), cache_behavior + ) + cache_behavior = self.validate_trusted_signers(config, cache_behavior.get("trusted_signers"), cache_behavior) + cache_behavior = self.validate_field_level_encryption_id( + config, cache_behavior.get("field_level_encryption_id"), cache_behavior + ) return cache_behavior def validate_cache_behavior_first_level_keys(self, config, cache_behavior, valid_origins, is_default_cache): try: - cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'min_ttl', 'min_t_t_l', - config.get('min_t_t_l', self.__default_cache_behavior_min_ttl)) - cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'max_ttl', 'max_t_t_l', - config.get('max_t_t_l', self.__default_cache_behavior_max_ttl)) - cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'default_ttl', 'default_t_t_l', - config.get('default_t_t_l', self.__default_cache_behavior_default_ttl)) - cache_behavior = self.add_missing_key(cache_behavior, 'compress', config.get('compress', self.__default_cache_behavior_compress)) - target_origin_id = cache_behavior.get('target_origin_id', config.get('target_origin_id')) + cache_behavior = self.add_key_else_change_dict_key( + cache_behavior, "min_ttl", "min_t_t_l", config.get("min_t_t_l", self.__default_cache_behavior_min_ttl) + ) + cache_behavior = self.add_key_else_change_dict_key( + cache_behavior, "max_ttl", "max_t_t_l", config.get("max_t_t_l", self.__default_cache_behavior_max_ttl) + ) + cache_behavior = self.add_key_else_change_dict_key( + cache_behavior, + "default_ttl", + "default_t_t_l", + config.get("default_t_t_l", self.__default_cache_behavior_default_ttl), + ) + cache_behavior = self.add_missing_key( + cache_behavior, "compress", config.get("compress", self.__default_cache_behavior_compress) + ) + target_origin_id = cache_behavior.get("target_origin_id", config.get("target_origin_id")) if not target_origin_id: target_origin_id = self.get_first_origin_id_for_default_cache_behavior(valid_origins) - if target_origin_id not in [origin['id'] for origin in valid_origins.get('items', [])]: + if target_origin_id not in [origin["id"] for origin in valid_origins.get("items", [])]: if is_default_cache: - cache_behavior_name = 'Default cache behavior' + cache_behavior_name = "Default cache behavior" else: - cache_behavior_name = 'Cache behavior for path %s' % cache_behavior['path_pattern'] - self.module.fail_json(msg="%s has target_origin_id pointing to an origin that does not exist." % - cache_behavior_name) - cache_behavior['target_origin_id'] = target_origin_id - cache_behavior = self.add_key_else_validate(cache_behavior, 'viewer_protocol_policy', 'cache_behavior.viewer_protocol_policy', - config.get('viewer_protocol_policy', - self.__default_cache_behavior_viewer_protocol_policy), - self.__valid_viewer_protocol_policies) - cache_behavior = self.add_missing_key(cache_behavior, 'smooth_streaming', - config.get('smooth_streaming', self.__default_cache_behavior_smooth_streaming)) + cache_behavior_name = "Cache behavior for path %s" % cache_behavior["path_pattern"] + self.module.fail_json( + msg="%s has target_origin_id pointing to an origin that does not exist." % cache_behavior_name + ) + cache_behavior["target_origin_id"] = target_origin_id + cache_behavior = self.add_key_else_validate( + cache_behavior, + "viewer_protocol_policy", + "cache_behavior.viewer_protocol_policy", + config.get("viewer_protocol_policy", self.__default_cache_behavior_viewer_protocol_policy), + self.__valid_viewer_protocol_policies, + ) + cache_behavior = self.add_missing_key( + cache_behavior, + "smooth_streaming", + config.get("smooth_streaming", self.__default_cache_behavior_smooth_streaming), + ) return cache_behavior except Exception as e: self.module.fail_json_aws(e, msg="Error validating distribution cache behavior first level keys") @@ -1854,30 +1971,40 @@ def validate_forwarded_values(self, config, forwarded_values, cache_behavior): try: if not forwarded_values: forwarded_values = dict() - existing_config = config.get('forwarded_values', {}) - headers = forwarded_values.get('headers', existing_config.get('headers', {}).get('items')) + existing_config = config.get("forwarded_values", {}) + headers = forwarded_values.get("headers", existing_config.get("headers", {}).get("items")) if headers: headers.sort() - forwarded_values['headers'] = ansible_list_to_cloudfront_list(headers) - if 'cookies' not in forwarded_values: - forward = existing_config.get('cookies', {}).get('forward', self.__default_cache_behavior_forwarded_values_forward_cookies) - forwarded_values['cookies'] = {'forward': forward} + forwarded_values["headers"] = ansible_list_to_cloudfront_list(headers) + if "cookies" not in forwarded_values: + forward = existing_config.get("cookies", {}).get( + "forward", self.__default_cache_behavior_forwarded_values_forward_cookies + ) + forwarded_values["cookies"] = {"forward": forward} else: - existing_whitelist = existing_config.get('cookies', {}).get('whitelisted_names', {}).get('items') - whitelist = forwarded_values.get('cookies').get('whitelisted_names', existing_whitelist) + existing_whitelist = existing_config.get("cookies", {}).get("whitelisted_names", {}).get("items") + whitelist = forwarded_values.get("cookies").get("whitelisted_names", existing_whitelist) if whitelist: - self.validate_is_list(whitelist, 'forwarded_values.whitelisted_names') - forwarded_values['cookies']['whitelisted_names'] = ansible_list_to_cloudfront_list(whitelist) - cookie_forwarding = forwarded_values.get('cookies').get('forward', existing_config.get('cookies', {}).get('forward')) - self.validate_attribute_with_allowed_values(cookie_forwarding, 'cache_behavior.forwarded_values.cookies.forward', - self.__valid_cookie_forwarding) - forwarded_values['cookies']['forward'] = cookie_forwarding - query_string_cache_keys = forwarded_values.get('query_string_cache_keys', existing_config.get('query_string_cache_keys', {}).get('items', [])) - self.validate_is_list(query_string_cache_keys, 'forwarded_values.query_string_cache_keys') - forwarded_values['query_string_cache_keys'] = ansible_list_to_cloudfront_list(query_string_cache_keys) - forwarded_values = self.add_missing_key(forwarded_values, 'query_string', - existing_config.get('query_string', self.__default_cache_behavior_forwarded_values_query_string)) - cache_behavior['forwarded_values'] = forwarded_values + self.validate_is_list(whitelist, "forwarded_values.whitelisted_names") + forwarded_values["cookies"]["whitelisted_names"] = ansible_list_to_cloudfront_list(whitelist) + cookie_forwarding = forwarded_values.get("cookies").get( + "forward", existing_config.get("cookies", {}).get("forward") + ) + self.validate_attribute_with_allowed_values( + cookie_forwarding, "cache_behavior.forwarded_values.cookies.forward", self.__valid_cookie_forwarding + ) + forwarded_values["cookies"]["forward"] = cookie_forwarding + query_string_cache_keys = forwarded_values.get( + "query_string_cache_keys", existing_config.get("query_string_cache_keys", {}).get("items", []) + ) + self.validate_is_list(query_string_cache_keys, "forwarded_values.query_string_cache_keys") + forwarded_values["query_string_cache_keys"] = ansible_list_to_cloudfront_list(query_string_cache_keys) + forwarded_values = self.add_missing_key( + forwarded_values, + "query_string", + existing_config.get("query_string", self.__default_cache_behavior_forwarded_values_query_string), + ) + cache_behavior["forwarded_values"] = forwarded_values return cache_behavior except Exception as e: self.module.fail_json_aws(e, msg="Error validating forwarded values") @@ -1885,57 +2012,68 @@ def validate_forwarded_values(self, config, forwarded_values, cache_behavior): def validate_lambda_function_associations(self, config, lambda_function_associations, cache_behavior): try: if lambda_function_associations is not None: - self.validate_is_list(lambda_function_associations, 'lambda_function_associations') + self.validate_is_list(lambda_function_associations, "lambda_function_associations") for association in lambda_function_associations: - association = change_dict_key_name(association, 'lambda_function_arn', 'lambda_function_a_r_n') - self.validate_attribute_with_allowed_values(association.get('event_type'), 'cache_behaviors[].lambda_function_associations.event_type', - self.__valid_lambda_function_association_event_types) - cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list(lambda_function_associations) + association = change_dict_key_name(association, "lambda_function_arn", "lambda_function_a_r_n") + self.validate_attribute_with_allowed_values( + association.get("event_type"), + "cache_behaviors[].lambda_function_associations.event_type", + self.__valid_lambda_function_association_event_types, + ) + cache_behavior["lambda_function_associations"] = ansible_list_to_cloudfront_list( + lambda_function_associations + ) else: - if 'lambda_function_associations' in config: - cache_behavior['lambda_function_associations'] = config.get('lambda_function_associations') + if "lambda_function_associations" in config: + cache_behavior["lambda_function_associations"] = config.get("lambda_function_associations") else: - cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list([]) + cache_behavior["lambda_function_associations"] = ansible_list_to_cloudfront_list([]) return cache_behavior except Exception as e: self.module.fail_json_aws(e, msg="Error validating lambda function associations") def validate_field_level_encryption_id(self, config, field_level_encryption_id, cache_behavior): if field_level_encryption_id is not None: - cache_behavior['field_level_encryption_id'] = field_level_encryption_id - elif 'field_level_encryption_id' in config: - cache_behavior['field_level_encryption_id'] = config.get('field_level_encryption_id') + cache_behavior["field_level_encryption_id"] = field_level_encryption_id + elif "field_level_encryption_id" in config: + cache_behavior["field_level_encryption_id"] = config.get("field_level_encryption_id") else: - cache_behavior['field_level_encryption_id'] = "" + cache_behavior["field_level_encryption_id"] = "" return cache_behavior def validate_allowed_methods(self, config, allowed_methods, cache_behavior): try: if allowed_methods is not None: - self.validate_required_key('items', 'cache_behavior.allowed_methods.items[]', allowed_methods) - temp_allowed_items = allowed_methods.get('items') - self.validate_is_list(temp_allowed_items, 'cache_behavior.allowed_methods.items') - self.validate_attribute_list_with_allowed_list(temp_allowed_items, 'cache_behavior.allowed_methods.items[]', - self.__valid_methods_allowed_methods) - cached_items = allowed_methods.get('cached_methods') - if 'cached_methods' in allowed_methods: - self.validate_is_list(cached_items, 'cache_behavior.allowed_methods.cached_methods') - self.validate_attribute_list_with_allowed_list(cached_items, 'cache_behavior.allowed_items.cached_methods[]', - self.__valid_methods_cached_methods) + self.validate_required_key("items", "cache_behavior.allowed_methods.items[]", allowed_methods) + temp_allowed_items = allowed_methods.get("items") + self.validate_is_list(temp_allowed_items, "cache_behavior.allowed_methods.items") + self.validate_attribute_list_with_allowed_list( + temp_allowed_items, "cache_behavior.allowed_methods.items[]", self.__valid_methods_allowed_methods + ) + cached_items = allowed_methods.get("cached_methods") + if "cached_methods" in allowed_methods: + self.validate_is_list(cached_items, "cache_behavior.allowed_methods.cached_methods") + self.validate_attribute_list_with_allowed_list( + cached_items, + "cache_behavior.allowed_items.cached_methods[]", + self.__valid_methods_cached_methods, + ) # we don't care if the order of how cloudfront stores the methods differs - preserving existing # order reduces likelihood of making unnecessary changes - if 'allowed_methods' in config and set(config['allowed_methods']['items']) == set(temp_allowed_items): - cache_behavior['allowed_methods'] = config['allowed_methods'] + if "allowed_methods" in config and set(config["allowed_methods"]["items"]) == set(temp_allowed_items): + cache_behavior["allowed_methods"] = config["allowed_methods"] else: - cache_behavior['allowed_methods'] = ansible_list_to_cloudfront_list(temp_allowed_items) + cache_behavior["allowed_methods"] = ansible_list_to_cloudfront_list(temp_allowed_items) - if cached_items and set(cached_items) == set(config.get('allowed_methods', {}).get('cached_methods', {}).get('items', [])): - cache_behavior['allowed_methods']['cached_methods'] = config['allowed_methods']['cached_methods'] + if cached_items and set(cached_items) == set( + config.get("allowed_methods", {}).get("cached_methods", {}).get("items", []) + ): + cache_behavior["allowed_methods"]["cached_methods"] = config["allowed_methods"]["cached_methods"] else: - cache_behavior['allowed_methods']['cached_methods'] = ansible_list_to_cloudfront_list(cached_items) + cache_behavior["allowed_methods"]["cached_methods"] = ansible_list_to_cloudfront_list(cached_items) else: - if 'allowed_methods' in config: - cache_behavior['allowed_methods'] = config.get('allowed_methods') + if "allowed_methods" in config: + cache_behavior["allowed_methods"] = config.get("allowed_methods") return cache_behavior except Exception as e: self.module.fail_json_aws(e, msg="Error validating allowed methods") @@ -1944,14 +2082,16 @@ def validate_trusted_signers(self, config, trusted_signers, cache_behavior): try: if trusted_signers is None: trusted_signers = {} - if 'items' in trusted_signers: - valid_trusted_signers = ansible_list_to_cloudfront_list(trusted_signers.get('items')) + if "items" in trusted_signers: + valid_trusted_signers = ansible_list_to_cloudfront_list(trusted_signers.get("items")) else: - valid_trusted_signers = dict(quantity=config.get('quantity', 0)) - if 'items' in config: - valid_trusted_signers = dict(items=config['items']) - valid_trusted_signers['enabled'] = trusted_signers.get('enabled', config.get('enabled', self.__default_trusted_signers_enabled)) - cache_behavior['trusted_signers'] = valid_trusted_signers + valid_trusted_signers = dict(quantity=config.get("quantity", 0)) + if "items" in config: + valid_trusted_signers = dict(items=config["items"]) + valid_trusted_signers["enabled"] = trusted_signers.get( + "enabled", config.get("enabled", self.__default_trusted_signers_enabled) + ) + cache_behavior["trusted_signers"] = valid_trusted_signers return cache_behavior except Exception as e: self.module.fail_json_aws(e, msg="Error validating trusted signers") @@ -1960,19 +2100,37 @@ def validate_viewer_certificate(self, viewer_certificate): try: if viewer_certificate is None: return None - if viewer_certificate.get('cloudfront_default_certificate') and viewer_certificate.get('ssl_support_method') is not None: - self.module.fail_json(msg="viewer_certificate.ssl_support_method should not be specified with viewer_certificate_cloudfront_default" + - "_certificate set to true.") - self.validate_attribute_with_allowed_values(viewer_certificate.get('ssl_support_method'), 'viewer_certificate.ssl_support_method', - self.__valid_viewer_certificate_ssl_support_methods) - self.validate_attribute_with_allowed_values(viewer_certificate.get('minimum_protocol_version'), 'viewer_certificate.minimum_protocol_version', - self.__valid_viewer_certificate_minimum_protocol_versions) - self.validate_attribute_with_allowed_values(viewer_certificate.get('certificate_source'), 'viewer_certificate.certificate_source', - self.__valid_viewer_certificate_certificate_sources) - viewer_certificate = change_dict_key_name(viewer_certificate, 'cloudfront_default_certificate', 'cloud_front_default_certificate') - viewer_certificate = change_dict_key_name(viewer_certificate, 'ssl_support_method', 's_s_l_support_method') - viewer_certificate = change_dict_key_name(viewer_certificate, 'iam_certificate_id', 'i_a_m_certificate_id') - viewer_certificate = change_dict_key_name(viewer_certificate, 'acm_certificate_arn', 'a_c_m_certificate_arn') + if ( + viewer_certificate.get("cloudfront_default_certificate") + and viewer_certificate.get("ssl_support_method") is not None + ): + self.module.fail_json( + msg="viewer_certificate.ssl_support_method should not be specified with viewer_certificate_cloudfront_default" + + "_certificate set to true." + ) + self.validate_attribute_with_allowed_values( + viewer_certificate.get("ssl_support_method"), + "viewer_certificate.ssl_support_method", + self.__valid_viewer_certificate_ssl_support_methods, + ) + self.validate_attribute_with_allowed_values( + viewer_certificate.get("minimum_protocol_version"), + "viewer_certificate.minimum_protocol_version", + self.__valid_viewer_certificate_minimum_protocol_versions, + ) + self.validate_attribute_with_allowed_values( + viewer_certificate.get("certificate_source"), + "viewer_certificate.certificate_source", + self.__valid_viewer_certificate_certificate_sources, + ) + viewer_certificate = change_dict_key_name( + viewer_certificate, "cloudfront_default_certificate", "cloud_front_default_certificate" + ) + viewer_certificate = change_dict_key_name(viewer_certificate, "ssl_support_method", "s_s_l_support_method") + viewer_certificate = change_dict_key_name(viewer_certificate, "iam_certificate_id", "i_a_m_certificate_id") + viewer_certificate = change_dict_key_name( + viewer_certificate, "acm_certificate_arn", "a_c_m_certificate_arn" + ) return viewer_certificate except Exception as e: self.module.fail_json_aws(e, msg="Error validating viewer certificate") @@ -1981,16 +2139,18 @@ def validate_custom_error_responses(self, config, custom_error_responses, purge_ try: if custom_error_responses is None and not purge_custom_error_responses: return ansible_list_to_cloudfront_list(config) - self.validate_is_list(custom_error_responses, 'custom_error_responses') + self.validate_is_list(custom_error_responses, "custom_error_responses") result = list() - existing_responses = dict((response['error_code'], response) for response in custom_error_responses) + existing_responses = dict((response["error_code"], response) for response in custom_error_responses) for custom_error_response in custom_error_responses: - self.validate_required_key('error_code', 'custom_error_responses[].error_code', custom_error_response) - custom_error_response = change_dict_key_name(custom_error_response, 'error_caching_min_ttl', 'error_caching_min_t_t_l') - if 'response_code' in custom_error_response: - custom_error_response['response_code'] = str(custom_error_response['response_code']) - if custom_error_response['error_code'] in existing_responses: - del existing_responses[custom_error_response['error_code']] + self.validate_required_key("error_code", "custom_error_responses[].error_code", custom_error_response) + custom_error_response = change_dict_key_name( + custom_error_response, "error_caching_min_ttl", "error_caching_min_t_t_l" + ) + if "response_code" in custom_error_response: + custom_error_response["response_code"] = str(custom_error_response["response_code"]) + if custom_error_response["error_code"] in existing_responses: + del existing_responses[custom_error_response["error_code"]] result.append(custom_error_response) if not purge_custom_error_responses: result.extend(existing_responses.values()) @@ -2006,54 +2166,70 @@ def validate_restrictions(self, config, restrictions, purge_restrictions=False): return None else: return config - self.validate_required_key('geo_restriction', 'restrictions.geo_restriction', restrictions) - geo_restriction = restrictions.get('geo_restriction') - self.validate_required_key('restriction_type', 'restrictions.geo_restriction.restriction_type', geo_restriction) - existing_restrictions = config.get('geo_restriction', {}).get(geo_restriction['restriction_type'], {}).get('items', []) - geo_restriction_items = geo_restriction.get('items') + self.validate_required_key("geo_restriction", "restrictions.geo_restriction", restrictions) + geo_restriction = restrictions.get("geo_restriction") + self.validate_required_key( + "restriction_type", "restrictions.geo_restriction.restriction_type", geo_restriction + ) + existing_restrictions = ( + config.get("geo_restriction", {}).get(geo_restriction["restriction_type"], {}).get("items", []) + ) + geo_restriction_items = geo_restriction.get("items") if not purge_restrictions: - geo_restriction_items.extend([rest for rest in existing_restrictions if - rest not in geo_restriction_items]) + geo_restriction_items.extend( + [rest for rest in existing_restrictions if rest not in geo_restriction_items] + ) valid_restrictions = ansible_list_to_cloudfront_list(geo_restriction_items) - valid_restrictions['restriction_type'] = geo_restriction.get('restriction_type') - return {'geo_restriction': valid_restrictions} + valid_restrictions["restriction_type"] = geo_restriction.get("restriction_type") + return {"geo_restriction": valid_restrictions} except Exception as e: self.module.fail_json_aws(e, msg="Error validating restrictions") - def validate_distribution_config_parameters(self, config, default_root_object, ipv6_enabled, http_version, web_acl_id): + def validate_distribution_config_parameters( + self, config, default_root_object, ipv6_enabled, http_version, web_acl_id + ): try: - config['default_root_object'] = default_root_object or config.get('default_root_object', '') - config['is_i_p_v6_enabled'] = ipv6_enabled if ipv6_enabled is not None else config.get('is_i_p_v6_enabled', self.__default_ipv6_enabled) - if http_version is not None or config.get('http_version'): - self.validate_attribute_with_allowed_values(http_version, 'http_version', self.__valid_http_versions) - config['http_version'] = http_version or config.get('http_version') - if web_acl_id or config.get('web_a_c_l_id'): - config['web_a_c_l_id'] = web_acl_id or config.get('web_a_c_l_id') + config["default_root_object"] = default_root_object or config.get("default_root_object", "") + config["is_i_p_v6_enabled"] = ( + ipv6_enabled + if ipv6_enabled is not None + else config.get("is_i_p_v6_enabled", self.__default_ipv6_enabled) + ) + if http_version is not None or config.get("http_version"): + self.validate_attribute_with_allowed_values(http_version, "http_version", self.__valid_http_versions) + config["http_version"] = http_version or config.get("http_version") + if web_acl_id or config.get("web_a_c_l_id"): + config["web_a_c_l_id"] = web_acl_id or config.get("web_a_c_l_id") return config except Exception as e: self.module.fail_json_aws(e, msg="Error validating distribution config parameters") - def validate_common_distribution_parameters(self, config, enabled, aliases, logging, price_class, purge_aliases=False): + def validate_common_distribution_parameters( + self, config, enabled, aliases, logging, price_class, purge_aliases=False + ): try: if config is None: config = {} if aliases is not None: if not purge_aliases: - aliases.extend([alias for alias in config.get('aliases', {}).get('items', []) - if alias not in aliases]) - config['aliases'] = ansible_list_to_cloudfront_list(aliases) + aliases.extend( + [alias for alias in config.get("aliases", {}).get("items", []) if alias not in aliases] + ) + config["aliases"] = ansible_list_to_cloudfront_list(aliases) if logging is not None: - config['logging'] = self.validate_logging(logging) - config['enabled'] = enabled or config.get('enabled', self.__default_distribution_enabled) + config["logging"] = self.validate_logging(logging) + config["enabled"] = enabled or config.get("enabled", self.__default_distribution_enabled) if price_class is not None: - self.validate_attribute_with_allowed_values(price_class, 'price_class', self.__valid_price_classes) - config['price_class'] = price_class + self.validate_attribute_with_allowed_values(price_class, "price_class", self.__valid_price_classes) + config["price_class"] = price_class return config except Exception as e: self.module.fail_json_aws(e, msg="Error validating common distribution parameters") def validate_comment(self, config, comment): - config['comment'] = comment or config.get('comment', "Distribution created by Ansible with datetime stamp " + self.__default_datetime_string) + config["comment"] = comment or config.get( + "comment", "Distribution created by Ansible with datetime stamp " + self.__default_datetime_string + ) return config def validate_caller_reference(self, caller_reference): @@ -2062,37 +2238,58 @@ def validate_caller_reference(self, caller_reference): def get_first_origin_id_for_default_cache_behavior(self, valid_origins): try: if valid_origins is not None: - valid_origins_list = valid_origins.get('items') - if valid_origins_list is not None and isinstance(valid_origins_list, list) and len(valid_origins_list) > 0: - return str(valid_origins_list[0].get('id')) - self.module.fail_json(msg="There are no valid origins from which to specify a target_origin_id for the default_cache_behavior configuration.") + valid_origins_list = valid_origins.get("items") + if ( + valid_origins_list is not None + and isinstance(valid_origins_list, list) + and len(valid_origins_list) > 0 + ): + return str(valid_origins_list[0].get("id")) + self.module.fail_json( + msg="There are no valid origins from which to specify a target_origin_id for the default_cache_behavior configuration." + ) except Exception as e: self.module.fail_json_aws(e, msg="Error getting first origin_id for default cache behavior") def validate_attribute_list_with_allowed_list(self, attribute_list, attribute_list_name, allowed_list): try: self.validate_is_list(attribute_list, attribute_list_name) - if (isinstance(allowed_list, list) and set(attribute_list) not in allowed_list or - isinstance(allowed_list, set) and not set(allowed_list).issuperset(attribute_list)): - self.module.fail_json(msg='The attribute list {0} must be one of [{1}]'.format(attribute_list_name, ' '.join(str(a) for a in allowed_list))) + if ( + isinstance(allowed_list, list) + and set(attribute_list) not in allowed_list + or isinstance(allowed_list, set) + and not set(allowed_list).issuperset(attribute_list) + ): + self.module.fail_json( + msg="The attribute list {0} must be one of [{1}]".format( + attribute_list_name, " ".join(str(a) for a in allowed_list) + ) + ) except Exception as e: self.module.fail_json_aws(e, msg="Error validating attribute list with allowed value list") def validate_attribute_with_allowed_values(self, attribute, attribute_name, allowed_list): if attribute is not None and attribute not in allowed_list: - self.module.fail_json(msg='The attribute {0} must be one of [{1}]'.format(attribute_name, ' '.join(str(a) for a in allowed_list))) + self.module.fail_json( + msg="The attribute {0} must be one of [{1}]".format( + attribute_name, " ".join(str(a) for a in allowed_list) + ) + ) def validate_distribution_from_caller_reference(self, caller_reference): try: distributions = self.__cloudfront_facts_mgr.list_distributions(keyed=False) - distribution_name = 'Distribution' - distribution_config_name = 'DistributionConfig' - distribution_ids = [dist.get('Id') for dist in distributions] + distribution_name = "Distribution" + distribution_config_name = "DistributionConfig" + distribution_ids = [dist.get("Id") for dist in distributions] for distribution_id in distribution_ids: distribution = self.__cloudfront_facts_mgr.get_distribution(id=distribution_id) if distribution is not None: distribution_config = distribution[distribution_name].get(distribution_config_name) - if distribution_config is not None and distribution_config.get('CallerReference') == caller_reference: + if ( + distribution_config is not None + and distribution_config.get("CallerReference") == caller_reference + ): distribution[distribution_name][distribution_config_name] = distribution_config return distribution @@ -2110,28 +2307,33 @@ def validate_distribution_from_aliases_caller_reference(self, distribution_id, a return self.__cloudfront_facts_mgr.get_distribution(id=distribution_id) return None except Exception as e: - self.module.fail_json_aws(e, msg="Error validating distribution_id from alias, aliases and caller reference") + self.module.fail_json_aws( + e, msg="Error validating distribution_id from alias, aliases and caller reference" + ) def validate_distribution_id_from_alias(self, aliases): distributions = self.__cloudfront_facts_mgr.list_distributions(keyed=False) if distributions: for distribution in distributions: - distribution_aliases = distribution.get('Aliases', {}).get('Items', []) + distribution_aliases = distribution.get("Aliases", {}).get("Items", []) if set(aliases) & set(distribution_aliases): - return distribution['Id'] + return distribution["Id"] return None def wait_until_processed(self, client, wait_timeout, distribution_id, caller_reference): if distribution_id is None: - distribution_id = self.validate_distribution_from_caller_reference(caller_reference=caller_reference)['Id'] + distribution_id = self.validate_distribution_from_caller_reference(caller_reference=caller_reference)["Id"] try: - waiter = client.get_waiter('distribution_deployed') + waiter = client.get_waiter("distribution_deployed") attempts = 1 + int(wait_timeout / 60) - waiter.wait(Id=distribution_id, WaiterConfig={'MaxAttempts': attempts}) + waiter.wait(Id=distribution_id, WaiterConfig={"MaxAttempts": attempts}) except botocore.exceptions.WaiterError as e: - self.module.fail_json_aws(e, msg="Timeout waiting for CloudFront action." - " Waited for {0} seconds before timeout.".format(to_text(wait_timeout))) + self.module.fail_json_aws( + e, + msg="Timeout waiting for CloudFront action." + " Waited for {0} seconds before timeout.".format(to_text(wait_timeout)), + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error getting distribution {0}".format(distribution_id)) @@ -2139,36 +2341,36 @@ def wait_until_processed(self, client, wait_timeout, distribution_id, caller_ref def main(): argument_spec = dict( - state=dict(choices=['present', 'absent'], default='present'), + state=dict(choices=["present", "absent"], default="present"), caller_reference=dict(), comment=dict(), distribution_id=dict(), e_tag=dict(), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), alias=dict(), - aliases=dict(type='list', default=[], elements='str'), - purge_aliases=dict(type='bool', default=False), + aliases=dict(type="list", default=[], elements="str"), + purge_aliases=dict(type="bool", default=False), default_root_object=dict(), - origins=dict(type='list', elements='dict'), - purge_origins=dict(type='bool', default=False), - default_cache_behavior=dict(type='dict'), - cache_behaviors=dict(type='list', elements='dict'), - purge_cache_behaviors=dict(type='bool', default=False), - custom_error_responses=dict(type='list', elements='dict'), - purge_custom_error_responses=dict(type='bool', default=False), - logging=dict(type='dict'), + origins=dict(type="list", elements="dict"), + purge_origins=dict(type="bool", default=False), + default_cache_behavior=dict(type="dict"), + cache_behaviors=dict(type="list", elements="dict"), + purge_cache_behaviors=dict(type="bool", default=False), + custom_error_responses=dict(type="list", elements="dict"), + purge_custom_error_responses=dict(type="bool", default=False), + logging=dict(type="dict"), price_class=dict(), - enabled=dict(type='bool'), - viewer_certificate=dict(type='dict'), - restrictions=dict(type='dict'), + enabled=dict(type="bool"), + viewer_certificate=dict(type="dict"), + restrictions=dict(type="dict"), web_acl_id=dict(), http_version=dict(), - ipv6_enabled=dict(type='bool'), + ipv6_enabled=dict(type="bool"), default_origin_domain_name=dict(), default_origin_path=dict(), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=1800, type='int') + wait=dict(default=False, type="bool"), + wait_timeout=dict(default=1800, type="int"), ) result = {} @@ -2178,129 +2380,154 @@ def main(): argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[ - ['distribution_id', 'alias'], - ['default_origin_domain_name', 'distribution_id'], - ['default_origin_domain_name', 'alias'], - ] + ["distribution_id", "alias"], + ["default_origin_domain_name", "distribution_id"], + ["default_origin_domain_name", "alias"], + ], ) - client = module.client('cloudfront', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("cloudfront", retry_decorator=AWSRetry.jittered_backoff()) validation_mgr = CloudFrontValidationManager(module) - state = module.params.get('state') - caller_reference = module.params.get('caller_reference') - comment = module.params.get('comment') - e_tag = module.params.get('e_tag') - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - distribution_id = module.params.get('distribution_id') - alias = module.params.get('alias') - aliases = module.params.get('aliases') - purge_aliases = module.params.get('purge_aliases') - default_root_object = module.params.get('default_root_object') - origins = module.params.get('origins') - purge_origins = module.params.get('purge_origins') - default_cache_behavior = module.params.get('default_cache_behavior') - cache_behaviors = module.params.get('cache_behaviors') - purge_cache_behaviors = module.params.get('purge_cache_behaviors') - custom_error_responses = module.params.get('custom_error_responses') - purge_custom_error_responses = module.params.get('purge_custom_error_responses') - logging = module.params.get('logging') - price_class = module.params.get('price_class') - enabled = module.params.get('enabled') - viewer_certificate = module.params.get('viewer_certificate') - restrictions = module.params.get('restrictions') - purge_restrictions = module.params.get('purge_restrictions') - web_acl_id = module.params.get('web_acl_id') - http_version = module.params.get('http_version') - ipv6_enabled = module.params.get('ipv6_enabled') - default_origin_domain_name = module.params.get('default_origin_domain_name') - default_origin_path = module.params.get('default_origin_path') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') + state = module.params.get("state") + caller_reference = module.params.get("caller_reference") + comment = module.params.get("comment") + e_tag = module.params.get("e_tag") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + distribution_id = module.params.get("distribution_id") + alias = module.params.get("alias") + aliases = module.params.get("aliases") + purge_aliases = module.params.get("purge_aliases") + default_root_object = module.params.get("default_root_object") + origins = module.params.get("origins") + purge_origins = module.params.get("purge_origins") + default_cache_behavior = module.params.get("default_cache_behavior") + cache_behaviors = module.params.get("cache_behaviors") + purge_cache_behaviors = module.params.get("purge_cache_behaviors") + custom_error_responses = module.params.get("custom_error_responses") + purge_custom_error_responses = module.params.get("purge_custom_error_responses") + logging = module.params.get("logging") + price_class = module.params.get("price_class") + enabled = module.params.get("enabled") + viewer_certificate = module.params.get("viewer_certificate") + restrictions = module.params.get("restrictions") + purge_restrictions = module.params.get("purge_restrictions") + web_acl_id = module.params.get("web_acl_id") + http_version = module.params.get("http_version") + ipv6_enabled = module.params.get("ipv6_enabled") + default_origin_domain_name = module.params.get("default_origin_domain_name") + default_origin_path = module.params.get("default_origin_path") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") if alias and alias not in aliases: aliases.append(alias) - distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference) + distribution = validation_mgr.validate_distribution_from_aliases_caller_reference( + distribution_id, aliases, caller_reference + ) - update = state == 'present' and distribution - create = state == 'present' and not distribution - delete = state == 'absent' and distribution + update = state == "present" and distribution + create = state == "present" and not distribution + delete = state == "absent" and distribution if not (update or create or delete): module.exit_json(changed=False) config = {} if update or delete: - config = distribution['Distribution']['DistributionConfig'] - e_tag = distribution['ETag'] - distribution_id = distribution['Distribution']['Id'] + config = distribution["Distribution"]["DistributionConfig"] + e_tag = distribution["ETag"] + distribution_id = distribution["Distribution"]["Id"] if update: config = camel_dict_to_snake_dict(config, reversible=True) if create or update: - config = validation_mgr.validate_common_distribution_parameters(config, enabled, aliases, logging, price_class, purge_aliases) - config = validation_mgr.validate_distribution_config_parameters(config, default_root_object, ipv6_enabled, http_version, web_acl_id) - config['origins'] = validation_mgr.validate_origins(client, config.get('origins', {}).get('items', []), origins, default_origin_domain_name, - default_origin_path, create, purge_origins) - config['cache_behaviors'] = validation_mgr.validate_cache_behaviors(config.get('cache_behaviors', {}).get('items', []), - cache_behaviors, config['origins'], purge_cache_behaviors) - config['default_cache_behavior'] = validation_mgr.validate_cache_behavior(config.get('default_cache_behavior', {}), - default_cache_behavior, config['origins'], True) - config['custom_error_responses'] = validation_mgr.validate_custom_error_responses(config.get('custom_error_responses', {}).get('items', []), - custom_error_responses, purge_custom_error_responses) - valid_restrictions = validation_mgr.validate_restrictions(config.get('restrictions', {}), restrictions, purge_restrictions) + config = validation_mgr.validate_common_distribution_parameters( + config, enabled, aliases, logging, price_class, purge_aliases + ) + config = validation_mgr.validate_distribution_config_parameters( + config, default_root_object, ipv6_enabled, http_version, web_acl_id + ) + config["origins"] = validation_mgr.validate_origins( + client, + config.get("origins", {}).get("items", []), + origins, + default_origin_domain_name, + default_origin_path, + create, + purge_origins, + ) + config["cache_behaviors"] = validation_mgr.validate_cache_behaviors( + config.get("cache_behaviors", {}).get("items", []), + cache_behaviors, + config["origins"], + purge_cache_behaviors, + ) + config["default_cache_behavior"] = validation_mgr.validate_cache_behavior( + config.get("default_cache_behavior", {}), default_cache_behavior, config["origins"], True + ) + config["custom_error_responses"] = validation_mgr.validate_custom_error_responses( + config.get("custom_error_responses", {}).get("items", []), + custom_error_responses, + purge_custom_error_responses, + ) + valid_restrictions = validation_mgr.validate_restrictions( + config.get("restrictions", {}), restrictions, purge_restrictions + ) if valid_restrictions: - config['restrictions'] = valid_restrictions + config["restrictions"] = valid_restrictions valid_viewer_certificate = validation_mgr.validate_viewer_certificate(viewer_certificate) - config = merge_validation_into_config(config, valid_viewer_certificate, 'viewer_certificate') + config = merge_validation_into_config(config, valid_viewer_certificate, "viewer_certificate") config = validation_mgr.validate_comment(config, comment) config = snake_dict_to_camel_dict(config, capitalize_first=True) if create: - config['CallerReference'] = validation_mgr.validate_caller_reference(caller_reference) + config["CallerReference"] = validation_mgr.validate_caller_reference(caller_reference) result = create_distribution(client, module, config, ansible_dict_to_boto3_tag_list(tags or {})) result = camel_dict_to_snake_dict(result) - result['tags'] = list_tags_for_resource(client, module, result['arn']) + result["tags"] = list_tags_for_resource(client, module, result["arn"]) if delete: - if config['Enabled']: - config['Enabled'] = False + if config["Enabled"]: + config["Enabled"] = False result = update_distribution(client, module, config, distribution_id, e_tag) - validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference')) - distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference) + validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get("CallerReference")) + distribution = validation_mgr.validate_distribution_from_aliases_caller_reference( + distribution_id, aliases, caller_reference + ) # e_tag = distribution['ETag'] result = delete_distribution(client, module, distribution) if update: - changed = config != distribution['Distribution']['DistributionConfig'] + changed = config != distribution["Distribution"]["DistributionConfig"] if changed: result = update_distribution(client, module, config, distribution_id, e_tag) else: - result = distribution['Distribution'] - existing_tags = list_tags_for_resource(client, module, result['ARN']) - distribution['Distribution']['DistributionConfig']['tags'] = existing_tags - changed |= update_tags(client, module, existing_tags, tags, purge_tags, result['ARN']) + result = distribution["Distribution"] + existing_tags = list_tags_for_resource(client, module, result["ARN"]) + distribution["Distribution"]["DistributionConfig"]["tags"] = existing_tags + changed |= update_tags(client, module, existing_tags, tags, purge_tags, result["ARN"]) result = camel_dict_to_snake_dict(result) - result['distribution_config']['tags'] = config['tags'] = list_tags_for_resource(client, module, result['arn']) - result['diff'] = dict() - diff = recursive_diff(distribution['Distribution']['DistributionConfig'], config) + result["distribution_config"]["tags"] = config["tags"] = list_tags_for_resource(client, module, result["arn"]) + result["diff"] = dict() + diff = recursive_diff(distribution["Distribution"]["DistributionConfig"], config) if diff: - result['diff']['before'] = diff[0] - result['diff']['after'] = diff[1] + result["diff"]["before"] = diff[0] + result["diff"]["after"] = diff[1] if wait and (create or update): - validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference')) + validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get("CallerReference")) - if 'distribution_config' in result: - result.update(result['distribution_config']) - del result['distribution_config'] + if "distribution_config" in result: + result.update(result["distribution_config"]) + del result["distribution_config"] module.exit_json(changed=changed, **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/cloudfront_distribution_info.py b/plugins/modules/cloudfront_distribution_info.py index bc6bd8073c9..3bd20868ae5 100644 --- a/plugins/modules/cloudfront_distribution_info.py +++ b/plugins/modules/cloudfront_distribution_info.py @@ -250,8 +250,8 @@ def set_facts_for_distribution_id_and_alias(details, facts, distribution_id, aliases): facts[distribution_id] = details # also have a fixed key for accessing results/details returned - facts['result'] = details - facts['result']['DistributionId'] = distribution_id + facts["result"] = details + facts["result"]["DistributionId"] = distribution_id for alias in aliases: facts[alias] = details @@ -260,78 +260,94 @@ def set_facts_for_distribution_id_and_alias(details, facts, distribution_id, ali def main(): argument_spec = dict( - distribution_id=dict(required=False, type='str'), - invalidation_id=dict(required=False, type='str'), - origin_access_identity_id=dict(required=False, type='str'), - domain_name_alias=dict(required=False, type='str'), - all_lists=dict(required=False, default=False, type='bool'), - distribution=dict(required=False, default=False, type='bool'), - distribution_config=dict(required=False, default=False, type='bool'), - origin_access_identity=dict(required=False, default=False, type='bool'), - origin_access_identity_config=dict(required=False, default=False, type='bool'), - invalidation=dict(required=False, default=False, type='bool'), - streaming_distribution=dict(required=False, default=False, type='bool'), - streaming_distribution_config=dict(required=False, default=False, type='bool'), - list_origin_access_identities=dict(required=False, default=False, type='bool'), - list_distributions=dict(required=False, default=False, type='bool'), - list_distributions_by_web_acl_id=dict(required=False, default=False, type='bool'), - list_invalidations=dict(required=False, default=False, type='bool'), - list_streaming_distributions=dict(required=False, default=False, type='bool'), - summary=dict(required=False, default=False, type='bool'), + distribution_id=dict(required=False, type="str"), + invalidation_id=dict(required=False, type="str"), + origin_access_identity_id=dict(required=False, type="str"), + domain_name_alias=dict(required=False, type="str"), + all_lists=dict(required=False, default=False, type="bool"), + distribution=dict(required=False, default=False, type="bool"), + distribution_config=dict(required=False, default=False, type="bool"), + origin_access_identity=dict(required=False, default=False, type="bool"), + origin_access_identity_config=dict(required=False, default=False, type="bool"), + invalidation=dict(required=False, default=False, type="bool"), + streaming_distribution=dict(required=False, default=False, type="bool"), + streaming_distribution_config=dict(required=False, default=False, type="bool"), + list_origin_access_identities=dict(required=False, default=False, type="bool"), + list_distributions=dict(required=False, default=False, type="bool"), + list_distributions_by_web_acl_id=dict(required=False, default=False, type="bool"), + list_invalidations=dict(required=False, default=False, type="bool"), + list_streaming_distributions=dict(required=False, default=False, type="bool"), + summary=dict(required=False, default=False, type="bool"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) service_mgr = CloudFrontFactsServiceManager(module) - distribution_id = module.params.get('distribution_id') - invalidation_id = module.params.get('invalidation_id') - origin_access_identity_id = module.params.get('origin_access_identity_id') - web_acl_id = module.params.get('web_acl_id') - domain_name_alias = module.params.get('domain_name_alias') - all_lists = module.params.get('all_lists') - distribution = module.params.get('distribution') - distribution_config = module.params.get('distribution_config') - origin_access_identity = module.params.get('origin_access_identity') - origin_access_identity_config = module.params.get('origin_access_identity_config') - invalidation = module.params.get('invalidation') - streaming_distribution = module.params.get('streaming_distribution') - streaming_distribution_config = module.params.get('streaming_distribution_config') - list_origin_access_identities = module.params.get('list_origin_access_identities') - list_distributions = module.params.get('list_distributions') - list_distributions_by_web_acl_id = module.params.get('list_distributions_by_web_acl_id') - list_invalidations = module.params.get('list_invalidations') - list_streaming_distributions = module.params.get('list_streaming_distributions') - summary = module.params.get('summary') + distribution_id = module.params.get("distribution_id") + invalidation_id = module.params.get("invalidation_id") + origin_access_identity_id = module.params.get("origin_access_identity_id") + web_acl_id = module.params.get("web_acl_id") + domain_name_alias = module.params.get("domain_name_alias") + all_lists = module.params.get("all_lists") + distribution = module.params.get("distribution") + distribution_config = module.params.get("distribution_config") + origin_access_identity = module.params.get("origin_access_identity") + origin_access_identity_config = module.params.get("origin_access_identity_config") + invalidation = module.params.get("invalidation") + streaming_distribution = module.params.get("streaming_distribution") + streaming_distribution_config = module.params.get("streaming_distribution_config") + list_origin_access_identities = module.params.get("list_origin_access_identities") + list_distributions = module.params.get("list_distributions") + list_distributions_by_web_acl_id = module.params.get("list_distributions_by_web_acl_id") + list_invalidations = module.params.get("list_invalidations") + list_streaming_distributions = module.params.get("list_streaming_distributions") + summary = module.params.get("summary") aliases = [] - result = {'cloudfront': {}} + result = {"cloudfront": {}} facts = {} - require_distribution_id = (distribution or distribution_config or invalidation or streaming_distribution or - streaming_distribution_config or list_invalidations) + require_distribution_id = ( + distribution + or distribution_config + or invalidation + or streaming_distribution + or streaming_distribution_config + or list_invalidations + ) # set default to summary if no option specified - summary = summary or not (distribution or distribution_config or origin_access_identity or - origin_access_identity_config or invalidation or streaming_distribution or streaming_distribution_config or - list_origin_access_identities or list_distributions_by_web_acl_id or list_invalidations or - list_streaming_distributions or list_distributions) + summary = summary or not ( + distribution + or distribution_config + or origin_access_identity + or origin_access_identity_config + or invalidation + or streaming_distribution + or streaming_distribution_config + or list_origin_access_identities + or list_distributions_by_web_acl_id + or list_invalidations + or list_streaming_distributions + or list_distributions + ) # validations if require_distribution_id and distribution_id is None and domain_name_alias is None: - module.fail_json(msg='Error distribution_id or domain_name_alias have not been specified.') - if (invalidation and invalidation_id is None): - module.fail_json(msg='Error invalidation_id has not been specified.') + module.fail_json(msg="Error distribution_id or domain_name_alias have not been specified.") + if invalidation and invalidation_id is None: + module.fail_json(msg="Error invalidation_id has not been specified.") if (origin_access_identity or origin_access_identity_config) and origin_access_identity_id is None: - module.fail_json(msg='Error origin_access_identity_id has not been specified.') + module.fail_json(msg="Error origin_access_identity_id has not been specified.") if list_distributions_by_web_acl_id and web_acl_id is None: - module.fail_json(msg='Error web_acl_id has not been specified.') + module.fail_json(msg="Error web_acl_id has not been specified.") # get distribution id from domain name alias if require_distribution_id and distribution_id is None: distribution_id = service_mgr.get_distribution_id_from_domain_name(domain_name_alias) if not distribution_id: - module.fail_json(msg='Error unable to source a distribution id from domain_name_alias') + module.fail_json(msg="Error unable to source a distribution id from domain_name_alias") # set appropriate cloudfront id if invalidation_id is not None and invalidation: @@ -349,7 +365,9 @@ def main(): if origin_access_identity: facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity(id=origin_access_identity_id)) if origin_access_identity_config: - facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity_config(id=origin_access_identity_id)) + facts[origin_access_identity_id].update( + service_mgr.get_origin_access_identity_config(id=origin_access_identity_id) + ) if invalidation: facts_to_set = service_mgr.get_invalidation(distribution_id=distribution_id, id=invalidation_id) facts[invalidation_id].update(facts_to_set) @@ -359,32 +377,32 @@ def main(): facts_to_set = service_mgr.get_streaming_distribution_config(id=distribution_id) if list_invalidations: invalidations = service_mgr.list_invalidations(distribution_id=distribution_id) or {} - facts_to_set = {'invalidations': invalidations} - if 'facts_to_set' in vars(): + facts_to_set = {"invalidations": invalidations} + if "facts_to_set" in vars(): aliases = service_mgr.get_aliases_from_distribution_id(distribution_id) facts = set_facts_for_distribution_id_and_alias(facts_to_set, facts, distribution_id, aliases) # get list based on options if all_lists or list_origin_access_identities: - facts['origin_access_identities'] = service_mgr.list_origin_access_identities() or {} + facts["origin_access_identities"] = service_mgr.list_origin_access_identities() or {} if all_lists or list_distributions: - facts['distributions'] = service_mgr.list_distributions() or {} + facts["distributions"] = service_mgr.list_distributions() or {} if all_lists or list_streaming_distributions: - facts['streaming_distributions'] = service_mgr.list_streaming_distributions() or {} + facts["streaming_distributions"] = service_mgr.list_streaming_distributions() or {} if list_distributions_by_web_acl_id: - facts['distributions_by_web_acl_id'] = service_mgr.list_distributions_by_web_acl_id(web_acl_id=web_acl_id) or {} + facts["distributions_by_web_acl_id"] = service_mgr.list_distributions_by_web_acl_id(web_acl_id=web_acl_id) or {} if list_invalidations: - facts['invalidations'] = service_mgr.list_invalidations(distribution_id=distribution_id) or {} + facts["invalidations"] = service_mgr.list_invalidations(distribution_id=distribution_id) or {} # default summary option if summary: - facts['summary'] = service_mgr.summary() + facts["summary"] = service_mgr.summary() - result['changed'] = False - result['cloudfront'].update(facts) + result["changed"] = False + result["cloudfront"].update(facts) module.exit_json(msg="Retrieved CloudFront info.", **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/cloudfront_invalidation.py b/plugins/modules/cloudfront_invalidation.py index adee5058b17..b98b56be2d2 100644 --- a/plugins/modules/cloudfront_invalidation.py +++ b/plugins/modules/cloudfront_invalidation.py @@ -152,24 +152,33 @@ class CloudFrontInvalidationServiceManager(object): def __init__(self, module, cloudfront_facts_mgr): self.module = module - self.client = module.client('cloudfront') + self.client = module.client("cloudfront") self.__cloudfront_facts_mgr = cloudfront_facts_mgr def create_invalidation(self, distribution_id, invalidation_batch): - current_invalidation_response = self.get_invalidation(distribution_id, invalidation_batch['CallerReference']) + current_invalidation_response = self.get_invalidation(distribution_id, invalidation_batch["CallerReference"]) try: - response = self.client.create_invalidation(DistributionId=distribution_id, InvalidationBatch=invalidation_batch) - response.pop('ResponseMetadata', None) + response = self.client.create_invalidation( + DistributionId=distribution_id, InvalidationBatch=invalidation_batch + ) + response.pop("ResponseMetadata", None) if current_invalidation_response: return response, False else: return response, True - except is_boto3_error_message('Your request contains a caller reference that was used for a previous invalidation ' - 'batch for the same distribution.'): - self.module.warn("InvalidationBatch target paths are not modifiable. " - "To make a new invalidation please update caller_reference.") + except is_boto3_error_message( + "Your request contains a caller reference that was used for a previous invalidation " + "batch for the same distribution." + ): + self.module.warn( + "InvalidationBatch target paths are not modifiable. " + "To make a new invalidation please update caller_reference." + ) return current_invalidation_response, False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.") def get_invalidation(self, distribution_id, caller_reference): @@ -178,9 +187,11 @@ def get_invalidation(self, distribution_id, caller_reference): # check if there is an invalidation with the same caller reference for invalidation in invalidations: - invalidation_info = self.__cloudfront_facts_mgr.get_invalidation(distribution_id=distribution_id, id=invalidation['Id']) - if invalidation_info.get('InvalidationBatch', {}).get('CallerReference') == caller_reference: - invalidation_info.pop('ResponseMetadata', None) + invalidation_info = self.__cloudfront_facts_mgr.get_invalidation( + distribution_id=distribution_id, id=invalidation["Id"] + ) + if invalidation_info.get("InvalidationBatch", {}).get("CallerReference") == caller_reference: + invalidation_info.pop("ResponseMetadata", None) return invalidation_info return {} @@ -217,8 +228,8 @@ def validate_invalidation_batch(self, invalidation_batch, caller_reference): else: valid_caller_reference = datetime.datetime.now().isoformat() valid_invalidation_batch = { - 'paths': self.create_aws_list(invalidation_batch), - 'caller_reference': valid_caller_reference + "paths": self.create_aws_list(invalidation_batch), + "caller_reference": valid_caller_reference, } return valid_invalidation_batch except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -230,19 +241,21 @@ def main(): caller_reference=dict(), distribution_id=dict(), alias=dict(), - target_paths=dict(required=True, type='list', elements='str') + target_paths=dict(required=True, type="list", elements="str"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[['distribution_id', 'alias']]) + module = AnsibleAWSModule( + argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[["distribution_id", "alias"]] + ) cloudfront_facts_mgr = CloudFrontFactsServiceManager(module) validation_mgr = CloudFrontInvalidationValidationManager(module, cloudfront_facts_mgr) service_mgr = CloudFrontInvalidationServiceManager(module, cloudfront_facts_mgr) - caller_reference = module.params.get('caller_reference') - distribution_id = module.params.get('distribution_id') - alias = module.params.get('alias') - target_paths = module.params.get('target_paths') + caller_reference = module.params.get("caller_reference") + distribution_id = module.params.get("distribution_id") + alias = module.params.get("alias") + target_paths = module.params.get("target_paths") result = {} @@ -254,5 +267,5 @@ def main(): module.exit_json(changed=changed, **camel_dict_to_snake_dict(result)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/cloudfront_origin_access_identity.py b/plugins/modules/cloudfront_origin_access_identity.py index 1da411f8677..3c9340df611 100644 --- a/plugins/modules/cloudfront_origin_access_identity.py +++ b/plugins/modules/cloudfront_origin_access_identity.py @@ -136,15 +136,12 @@ class CloudFrontOriginAccessIdentityServiceManager(object): def __init__(self, module): self.module = module - self.client = module.client('cloudfront') + self.client = module.client("cloudfront") def create_origin_access_identity(self, caller_reference, comment): try: return self.client.create_cloud_front_origin_access_identity( - CloudFrontOriginAccessIdentityConfig={ - 'CallerReference': caller_reference, - 'Comment': comment - } + CloudFrontOriginAccessIdentityConfig={"CallerReference": caller_reference, "Comment": comment} ) except (ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error creating cloud front origin access identity.") @@ -158,14 +155,12 @@ def delete_origin_access_identity(self, origin_access_identity_id, e_tag): def update_origin_access_identity(self, caller_reference, comment, origin_access_identity_id, e_tag): changed = False - new_config = { - 'CallerReference': caller_reference, - 'Comment': comment - } + new_config = {"CallerReference": caller_reference, "Comment": comment} try: - current_config = self.client.get_cloud_front_origin_access_identity_config( - Id=origin_access_identity_id)['CloudFrontOriginAccessIdentityConfig'] + current_config = self.client.get_cloud_front_origin_access_identity_config(Id=origin_access_identity_id)[ + "CloudFrontOriginAccessIdentityConfig" + ] except (ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error getting Origin Access Identity config.") @@ -197,8 +192,10 @@ def __init__(self, module): def describe_origin_access_identity(self, origin_access_identity_id, fail_if_missing=True): try: - return self.__cloudfront_facts_mgr.get_origin_access_identity(id=origin_access_identity_id, fail_if_error=False) - except is_boto3_error_code('NoSuchCloudFrontOriginAccessIdentity') as e: # pylint: disable=duplicate-except + return self.__cloudfront_facts_mgr.get_origin_access_identity( + id=origin_access_identity_id, fail_if_error=False + ) + except is_boto3_error_code("NoSuchCloudFrontOriginAccessIdentity") as e: # pylint: disable=duplicate-except if fail_if_missing: self.module.fail_json_aws(e, msg="Error getting etag from origin_access_identity.") return {} @@ -208,36 +205,39 @@ def describe_origin_access_identity(self, origin_access_identity_id, fail_if_mis def validate_etag_from_origin_access_identity_id(self, origin_access_identity_id, fail_if_missing): oai = self.describe_origin_access_identity(origin_access_identity_id, fail_if_missing) if oai is not None: - return oai.get('ETag') + return oai.get("ETag") - def validate_origin_access_identity_id_from_caller_reference( - self, caller_reference): + def validate_origin_access_identity_id_from_caller_reference(self, caller_reference): origin_access_identities = self.__cloudfront_facts_mgr.list_origin_access_identities() - origin_origin_access_identity_ids = [oai.get('Id') for oai in origin_access_identities] + origin_origin_access_identity_ids = [oai.get("Id") for oai in origin_access_identities] for origin_access_identity_id in origin_origin_access_identity_ids: - oai_config = (self.__cloudfront_facts_mgr.get_origin_access_identity_config(id=origin_access_identity_id)) - temp_caller_reference = oai_config.get('CloudFrontOriginAccessIdentityConfig').get('CallerReference') + oai_config = self.__cloudfront_facts_mgr.get_origin_access_identity_config(id=origin_access_identity_id) + temp_caller_reference = oai_config.get("CloudFrontOriginAccessIdentityConfig").get("CallerReference") if temp_caller_reference == caller_reference: return origin_access_identity_id def validate_comment(self, comment): if comment is None: - return "origin access identity created by Ansible with datetime " + datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f') + return "origin access identity created by Ansible with datetime " + datetime.datetime.now().strftime( + "%Y-%m-%dT%H:%M:%S.%f" + ) return comment def validate_caller_reference_from_origin_access_identity_id(self, origin_access_identity_id, caller_reference): if caller_reference is None: if origin_access_identity_id is None: - return datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f') + return datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f") oai = self.describe_origin_access_identity(origin_access_identity_id, fail_if_missing=True) - origin_access_config = oai.get('CloudFrontOriginAccessIdentity', {}).get('CloudFrontOriginAccessIdentityConfig', {}) - return origin_access_config.get('CallerReference') + origin_access_config = oai.get("CloudFrontOriginAccessIdentity", {}).get( + "CloudFrontOriginAccessIdentityConfig", {} + ) + return origin_access_config.get("CallerReference") return caller_reference def main(): argument_spec = dict( - state=dict(choices=['present', 'absent'], default='present'), + state=dict(choices=["present", "absent"], default="present"), origin_access_identity_id=dict(), caller_reference=dict(), comment=dict(), @@ -251,22 +251,28 @@ def main(): service_mgr = CloudFrontOriginAccessIdentityServiceManager(module) validation_mgr = CloudFrontOriginAccessIdentityValidationManager(module) - state = module.params.get('state') - caller_reference = module.params.get('caller_reference') + state = module.params.get("state") + caller_reference = module.params.get("caller_reference") - comment = module.params.get('comment') - origin_access_identity_id = module.params.get('origin_access_identity_id') + comment = module.params.get("comment") + origin_access_identity_id = module.params.get("origin_access_identity_id") if origin_access_identity_id is None and caller_reference is not None: - origin_access_identity_id = validation_mgr.validate_origin_access_identity_id_from_caller_reference(caller_reference) + origin_access_identity_id = validation_mgr.validate_origin_access_identity_id_from_caller_reference( + caller_reference + ) - if state == 'present': + if state == "present": comment = validation_mgr.validate_comment(comment) - caller_reference = validation_mgr.validate_caller_reference_from_origin_access_identity_id(origin_access_identity_id, caller_reference) + caller_reference = validation_mgr.validate_caller_reference_from_origin_access_identity_id( + origin_access_identity_id, caller_reference + ) if origin_access_identity_id is not None: e_tag = validation_mgr.validate_etag_from_origin_access_identity_id(origin_access_identity_id, True) # update cloudfront origin access identity - result, changed = service_mgr.update_origin_access_identity(caller_reference, comment, origin_access_identity_id, e_tag) + result, changed = service_mgr.update_origin_access_identity( + caller_reference, comment, origin_access_identity_id, e_tag + ) else: # create cloudfront origin access identity result = service_mgr.create_origin_access_identity(caller_reference, comment) @@ -276,10 +282,10 @@ def main(): if e_tag: result, changed = service_mgr.delete_origin_access_identity(origin_access_identity_id, e_tag) - result.pop('ResponseMetadata', None) + result.pop("ResponseMetadata", None) module.exit_json(changed=changed, **camel_dict_to_snake_dict(result)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/cloudfront_response_headers_policy.py b/plugins/modules/cloudfront_response_headers_policy.py index c84346c387a..a7558e8a86d 100644 --- a/plugins/modules/cloudfront_response_headers_policy.py +++ b/plugins/modules/cloudfront_response_headers_policy.py @@ -155,21 +155,20 @@ class CloudfrontResponseHeadersPolicyService(object): - def __init__(self, module): self.module = module - self.client = module.client('cloudfront') + self.client = module.client("cloudfront") self.check_mode = module.check_mode def find_response_headers_policy(self, name): try: - policies = self.client.list_response_headers_policies()['ResponseHeadersPolicyList']['Items'] + policies = self.client.list_response_headers_policies()["ResponseHeadersPolicyList"]["Items"] for policy in policies: - if policy['ResponseHeadersPolicy']['ResponseHeadersPolicyConfig']['Name'] == name: - policy_id = policy['ResponseHeadersPolicy']['Id'] + if policy["ResponseHeadersPolicy"]["ResponseHeadersPolicyConfig"]["Name"] == name: + policy_id = policy["ResponseHeadersPolicy"]["Id"] # as the list_ request does not contain the Etag (which we need), we need to do another get_ request here - matching_policy = self.client.get_response_headers_policy(Id=policy['ResponseHeadersPolicy']['Id']) + matching_policy = self.client.get_response_headers_policy(Id=policy["ResponseHeadersPolicy"]["Id"]) break else: matching_policy = None @@ -183,17 +182,17 @@ def create_response_header_policy(self, name, comment, cors_config, security_hea security_headers_config = snake_dict_to_camel_dict(security_headers_config, capitalize_first=True) # Little helper for turning xss_protection into XSSProtection and not into XssProtection - if 'XssProtection' in security_headers_config: - security_headers_config['XSSProtection'] = security_headers_config.pop('XssProtection') + if "XssProtection" in security_headers_config: + security_headers_config["XSSProtection"] = security_headers_config.pop("XssProtection") custom_headers_config = snake_dict_to_camel_dict(custom_headers_config, capitalize_first=True) config = { - 'Name': name, - 'Comment': comment, - 'CorsConfig': self.insert_quantities(cors_config), - 'SecurityHeadersConfig': security_headers_config, - 'CustomHeadersConfig': self.insert_quantities(custom_headers_config) + "Name": name, + "Comment": comment, + "CorsConfig": self.insert_quantities(cors_config), + "SecurityHeadersConfig": security_headers_config, + "CustomHeadersConfig": self.insert_quantities(custom_headers_config), } config = {k: v for k, v in config.items() if v} @@ -212,14 +211,16 @@ def create_response_header_policy(self, name, comment, cors_config, security_hea except (ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error creating policy") else: - policy_id = matching_policy['ResponseHeadersPolicy']['Id'] - etag = matching_policy['ETag'] + policy_id = matching_policy["ResponseHeadersPolicy"]["Id"] + etag = matching_policy["ETag"] try: - result = self.client.update_response_headers_policy(Id=policy_id, IfMatch=etag, ResponseHeadersPolicyConfig=config) + result = self.client.update_response_headers_policy( + Id=policy_id, IfMatch=etag, ResponseHeadersPolicyConfig=config + ) - changed_time = result['ResponseHeadersPolicy']['LastModifiedTime'] + changed_time = result["ResponseHeadersPolicy"]["LastModifiedTime"] seconds = 3 # threshhold for returned timestamp age - seconds_ago = (datetime.datetime.now(changed_time.tzinfo) - datetime.timedelta(0, seconds)) + seconds_ago = datetime.datetime.now(changed_time.tzinfo) - datetime.timedelta(0, seconds) # consider change made by this execution of the module if returned timestamp was very recent if changed_time > seconds_ago: @@ -235,8 +236,8 @@ def delete_response_header_policy(self, name): if matching_policy is None: self.module.exit_json(msg="Didn't find a matching policy by that name, not deleting") else: - policy_id = matching_policy['ResponseHeadersPolicy']['Id'] - etag = matching_policy['ETag'] + policy_id = matching_policy["ResponseHeadersPolicy"]["Id"] + etag = matching_policy["ETag"] if self.check_mode: result = {} else: @@ -251,43 +252,45 @@ def delete_response_header_policy(self, name): @staticmethod def insert_quantities(dict_with_items): # Items on top level case - if 'Items' in dict_with_items and isinstance(dict_with_items['Items'], list): - dict_with_items['Quantity'] = len(dict_with_items['Items']) + if "Items" in dict_with_items and isinstance(dict_with_items["Items"], list): + dict_with_items["Quantity"] = len(dict_with_items["Items"]) # Items on second level case for k, v in dict_with_items.items(): - if isinstance(v, dict) and 'Items' in v: - v['Quantity'] = len(v['Items']) + if isinstance(v, dict) and "Items" in v: + v["Quantity"] = len(v["Items"]) return dict_with_items def main(): argument_spec = dict( - name=dict(required=True, type='str'), - comment=dict(type='str'), - cors_config=dict(type='dict', default=dict()), - security_headers_config=dict(type='dict', default=dict()), - custom_headers_config=dict(type='dict', default=dict()), - state=dict(choices=['present', 'absent'], type='str', default='present'), + name=dict(required=True, type="str"), + comment=dict(type="str"), + cors_config=dict(type="dict", default=dict()), + security_headers_config=dict(type="dict", default=dict()), + custom_headers_config=dict(type="dict", default=dict()), + state=dict(choices=["present", "absent"], type="str", default="present"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - name = module.params.get('name') - comment = module.params.get('comment', '') - cors_config = module.params.get('cors_config') - security_headers_config = module.params.get('security_headers_config') - custom_headers_config = module.params.get('custom_headers_config') - state = module.params.get('state') + name = module.params.get("name") + comment = module.params.get("comment", "") + cors_config = module.params.get("cors_config") + security_headers_config = module.params.get("security_headers_config") + custom_headers_config = module.params.get("custom_headers_config") + state = module.params.get("state") service = CloudfrontResponseHeadersPolicyService(module) - if state == 'absent': + if state == "absent": service.delete_response_header_policy(name) else: - service.create_response_header_policy(name, comment, cors_config, security_headers_config, custom_headers_config) + service.create_response_header_policy( + name, comment, cors_config, security_headers_config, custom_headers_config + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/codebuild_project.py b/plugins/modules/codebuild_project.py index 71f05bf7233..6a910799d88 100644 --- a/plugins/modules/codebuild_project.py +++ b/plugins/modules/codebuild_project.py @@ -308,17 +308,15 @@ class CodeBuildAnsibleAWSError(AnsibleAWSError): def do_create_project(client, params, formatted_params): - if params["source"] is None or params["artifacts"] is None: raise CodeBuildAnsibleAWSError( message="The source and artifacts parameters must be provided " - "when creating a new project. No existing project was found.") + "when creating a new project. No existing project was found." + ) if params["tags"] is not None: formatted_params["tags"] = ansible_dict_to_boto3_tag_list( - params["tags"], - tag_name_key_name="key", - tag_value_key_name="value" + params["tags"], tag_name_key_name="key", tag_value_key_name="value" ) permitted_create_params = get_boto3_client_method_parameters(client, "create_project") @@ -357,7 +355,7 @@ def do_update_project(client, params, formatted_params, found_project): permitted_update_params = get_boto3_client_method_parameters(client, "update_project") formatted_update_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_update_params) - found_tags = found_project.pop('tags', []) + found_tags = found_project.pop("tags", []) if params["tags"] is not None: formatted_update_params["tags"] = format_tags( merge_tags(found_tags, params["tags"], params["purge_tags"]), @@ -373,7 +371,7 @@ def do_update_project(client, params, formatted_params, found_project): found_project["ResourceTags"] = boto3_tag_list_to_ansible_dict(found_tags) updated_project["ResourceTags"] = boto3_tag_list_to_ansible_dict(updated_tags) - changed = (updated_project != found_project) + changed = updated_project != found_project updated_project["tags"] = updated_tags return resp, changed @@ -381,7 +379,7 @@ def do_update_project(client, params, formatted_params, found_project): def create_or_update_project(client, params): resp = {} - name = params['name'] + name = params["name"] # clean up params formatted_params = snake_dict_to_camel_dict(dict((k, v) for k, v in params.items() if v is not None)) @@ -426,7 +424,7 @@ def delete_project(client, name): def describe_project(client, name): project = {} try: - projects = client.batch_get_projects(names=[name])['projects'] + projects = client.batch_get_projects(names=[name])["projects"] if len(projects) > 0: project = projects[0] return project @@ -439,11 +437,11 @@ def describe_project(client, name): def format_project_result(project_result): formated_result = camel_dict_to_snake_dict(project_result) - project = project_result.get('project', {}) + project = project_result.get("project", {}) if project: - tags = project.get('tags', []) - formated_result['project']['resource_tags'] = boto3_tag_list_to_ansible_dict(tags) - formated_result['ORIGINAL'] = project_result + tags = project.get("tags", []) + formated_result["project"]["resource_tags"] = boto3_tag_list_to_ansible_dict(tags) + formated_result["ORIGINAL"] = project_result return formated_result @@ -451,35 +449,35 @@ def main(): argument_spec = dict( name=dict(required=True), description=dict(), - source=dict(type='dict'), - artifacts=dict(type='dict'), - cache=dict(type='dict'), - environment=dict(type='dict'), + source=dict(type="dict"), + artifacts=dict(type="dict"), + cache=dict(type="dict"), + environment=dict(type="dict"), service_role=dict(), - timeout_in_minutes=dict(type='int', default=60), + timeout_in_minutes=dict(type="int", default=60), encryption_key=dict(no_log=False), - tags=dict(type='dict', aliases=["resource_tags"]), - purge_tags=dict(type='bool', default=True), - vpc_config=dict(type='dict'), - state=dict(choices=['present', 'absent'], default='present') + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + vpc_config=dict(type="dict"), + state=dict(choices=["present", "absent"], default="present"), ) module = AnsibleAWSModule(argument_spec=argument_spec) - client_conn = module.client('codebuild') + client_conn = module.client("codebuild") - state = module.params.get('state') + state = module.params.get("state") changed = False try: - if state == 'present': + if state == "present": project_result, changed = create_or_update_project( client=client_conn, params=module.params, ) - elif state == 'absent': + elif state == "absent": project_result, changed = delete_project( client=client_conn, - name=module.params['name'], + name=module.params["name"], ) except CodeBuildAnsibleAWSError as e: if e.exception: @@ -490,5 +488,5 @@ def main(): module.exit_json(changed=changed, **formatted_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/codecommit_repository.py b/plugins/modules/codecommit_repository.py index 1552738bea5..14b08bd88a9 100644 --- a/plugins/modules/codecommit_repository.py +++ b/plugins/modules/codecommit_repository.py @@ -145,39 +145,39 @@ class CodeCommit(object): def __init__(self, module=None): self._module = module - self._client = self._module.client('codecommit') + self._client = self._module.client("codecommit") self._check_mode = self._module.check_mode def process(self): result = dict(changed=False) - if self._module.params['state'] == 'present': + if self._module.params["state"] == "present": if not self._repository_exists(): if not self._check_mode: result = self._create_repository() - result['changed'] = True + result["changed"] = True else: - metadata = self._get_repository()['repositoryMetadata'] - if not metadata.get('repositoryDescription'): - metadata['repositoryDescription'] = '' - if metadata['repositoryDescription'] != self._module.params['description']: + metadata = self._get_repository()["repositoryMetadata"] + if not metadata.get("repositoryDescription"): + metadata["repositoryDescription"] = "" + if metadata["repositoryDescription"] != self._module.params["description"]: if not self._check_mode: self._update_repository() - result['changed'] = True + result["changed"] = True result.update(self._get_repository()) - if self._module.params['state'] == 'absent' and self._repository_exists(): + if self._module.params["state"] == "absent" and self._repository_exists(): if not self._check_mode: result = self._delete_repository() - result['changed'] = True + result["changed"] = True return result def _repository_exists(self): try: - paginator = self._client.get_paginator('list_repositories') + paginator = self._client.get_paginator("list_repositories") for page in paginator.paginate(): - repositories = page['repositories'] + repositories = page["repositories"] for item in repositories: - if self._module.params['name'] in item.values(): + if self._module.params["name"] in item.values(): return True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="couldn't get repository") @@ -186,7 +186,7 @@ def _repository_exists(self): def _get_repository(self): try: result = self._client.get_repository( - repositoryName=self._module.params['name'] + repositoryName=self._module.params["name"], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="couldn't get repository") @@ -195,8 +195,8 @@ def _get_repository(self): def _update_repository(self): try: result = self._client.update_repository_description( - repositoryName=self._module.params['name'], - repositoryDescription=self._module.params['description'] + repositoryName=self._module.params["name"], + repositoryDescription=self._module.params["description"], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="couldn't create repository") @@ -205,8 +205,8 @@ def _update_repository(self): def _create_repository(self): try: result = self._client.create_repository( - repositoryName=self._module.params['name'], - repositoryDescription=self._module.params['description'] + repositoryName=self._module.params["name"], + repositoryDescription=self._module.params["description"], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="couldn't create repository") @@ -215,7 +215,7 @@ def _create_repository(self): def _delete_repository(self): try: result = self._client.delete_repository( - repositoryName=self._module.params['name'] + repositoryName=self._module.params["name"], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="couldn't delete repository") @@ -225,13 +225,13 @@ def _delete_repository(self): def main(): argument_spec = dict( name=dict(required=True), - state=dict(choices=['present', 'absent'], required=True), - description=dict(default='', aliases=['comment']) + state=dict(choices=["present", "absent"], required=True), + description=dict(default="", aliases=["comment"]), ) ansible_aws_module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, ) aws_codecommit = CodeCommit(module=ansible_aws_module) @@ -239,5 +239,5 @@ def main(): ansible_aws_module.exit_json(**camel_dict_to_snake_dict(result)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/codepipeline.py b/plugins/modules/codepipeline.py index a2ec7713b4a..7e0baf3fd65 100644 --- a/plugins/modules/codepipeline.py +++ b/plugins/modules/codepipeline.py @@ -209,14 +209,14 @@ def create_pipeline(client, name, role_arn, artifact_store, stages, version, module): - pipeline_dict = {'name': name, 'roleArn': role_arn, 'artifactStore': artifact_store, 'stages': stages} + pipeline_dict = {"name": name, "roleArn": role_arn, "artifactStore": artifact_store, "stages": stages} if version: - pipeline_dict['version'] = version + pipeline_dict["version"] = version try: resp = client.create_pipeline(pipeline=pipeline_dict) return resp except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable create pipeline {0}".format(pipeline_dict['name'])) + module.fail_json_aws(e, msg="Unable create pipeline {0}".format(pipeline_dict["name"])) def update_pipeline(client, pipeline_dict, module): @@ -224,7 +224,7 @@ def update_pipeline(client, pipeline_dict, module): resp = client.update_pipeline(pipeline=pipeline_dict) return resp except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable update pipeline {0}".format(pipeline_dict['name'])) + module.fail_json_aws(e, msg="Unable update pipeline {0}".format(pipeline_dict["name"])) def delete_pipeline(client, name, module): @@ -244,63 +244,69 @@ def describe_pipeline(client, name, version, module): else: pipeline = client.get_pipeline(name=name) return pipeline - except is_boto3_error_code('PipelineNotFoundException'): + except is_boto3_error_code("PipelineNotFoundException"): return pipeline - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) def main(): argument_spec = dict( - name=dict(required=True, type='str'), - role_arn=dict(required=True, type='str'), - artifact_store=dict(required=True, type='dict'), - stages=dict(required=True, type='list', elements='dict'), - version=dict(type='int'), - state=dict(choices=['present', 'absent'], default='present') + name=dict(required=True, type="str"), + role_arn=dict(required=True, type="str"), + artifact_store=dict(required=True, type="dict"), + stages=dict(required=True, type="list", elements="dict"), + version=dict(type="int"), + state=dict(choices=["present", "absent"], default="present"), ) module = AnsibleAWSModule(argument_spec=argument_spec) - client_conn = module.client('codepipeline') + client_conn = module.client("codepipeline") - state = module.params.get('state') + state = module.params.get("state") changed = False # Determine if the CodePipeline exists - found_code_pipeline = describe_pipeline(client=client_conn, name=module.params['name'], version=module.params['version'], module=module) + found_code_pipeline = describe_pipeline( + client=client_conn, name=module.params["name"], version=module.params["version"], module=module + ) pipeline_result = {} - if state == 'present': - if 'pipeline' in found_code_pipeline: - pipeline_dict = copy.deepcopy(found_code_pipeline['pipeline']) + if state == "present": + if "pipeline" in found_code_pipeline: + pipeline_dict = copy.deepcopy(found_code_pipeline["pipeline"]) # Update dictionary with provided module params: - pipeline_dict['roleArn'] = module.params['role_arn'] - pipeline_dict['artifactStore'] = module.params['artifact_store'] - pipeline_dict['stages'] = module.params['stages'] - if module.params['version'] is not None: - pipeline_dict['version'] = module.params['version'] + pipeline_dict["roleArn"] = module.params["role_arn"] + pipeline_dict["artifactStore"] = module.params["artifact_store"] + pipeline_dict["stages"] = module.params["stages"] + if module.params["version"] is not None: + pipeline_dict["version"] = module.params["version"] pipeline_result = update_pipeline(client=client_conn, pipeline_dict=pipeline_dict, module=module) - if compare_policies(found_code_pipeline['pipeline'], pipeline_result['pipeline']): + if compare_policies(found_code_pipeline["pipeline"], pipeline_result["pipeline"]): changed = True else: pipeline_result = create_pipeline( client=client_conn, - name=module.params['name'], - role_arn=module.params['role_arn'], - artifact_store=module.params['artifact_store'], - stages=module.params['stages'], - version=module.params['version'], - module=module) + name=module.params["name"], + role_arn=module.params["role_arn"], + artifact_store=module.params["artifact_store"], + stages=module.params["stages"], + version=module.params["version"], + module=module, + ) changed = True - elif state == 'absent': + elif state == "absent": if found_code_pipeline: - pipeline_result = delete_pipeline(client=client_conn, name=module.params['name'], module=module) + pipeline_result = delete_pipeline(client=client_conn, name=module.params["name"], module=module) changed = True module.exit_json(changed=changed, **camel_dict_to_snake_dict(pipeline_result)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/config_aggregation_authorization.py b/plugins/modules/config_aggregation_authorization.py index 96f1eb1d9cd..903d5a5e1fe 100644 --- a/plugins/modules/config_aggregation_authorization.py +++ b/plugins/modules/config_aggregation_authorization.py @@ -62,10 +62,10 @@ def resource_exists(client, module, params): try: - current_authorizations = client.describe_aggregation_authorizations()['AggregationAuthorizations'] + current_authorizations = client.describe_aggregation_authorizations()["AggregationAuthorizations"] authorization_exists = next( - (item for item in current_authorizations if item["AuthorizedAccountId"] == params['AuthorizedAccountId']), - None + (item for item in current_authorizations if item["AuthorizedAccountId"] == params["AuthorizedAccountId"]), + None, ) if authorization_exists: return True @@ -76,32 +76,32 @@ def resource_exists(client, module, params): def create_resource(client, module, params, result): try: response = client.put_aggregation_authorization( - AuthorizedAccountId=params['AuthorizedAccountId'], - AuthorizedAwsRegion=params['AuthorizedAwsRegion'] + AuthorizedAccountId=params["AuthorizedAccountId"], + AuthorizedAwsRegion=params["AuthorizedAwsRegion"], ) - result['changed'] = True + result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization") def update_resource(client, module, params, result): - current_authorizations = client.describe_aggregation_authorizations()['AggregationAuthorizations'] + current_authorizations = client.describe_aggregation_authorizations()["AggregationAuthorizations"] current_params = next( - (item for item in current_authorizations if item["AuthorizedAccountId"] == params['AuthorizedAccountId']), - None + (item for item in current_authorizations if item["AuthorizedAccountId"] == params["AuthorizedAccountId"]), + None, ) - del current_params['AggregationAuthorizationArn'] - del current_params['CreationTime'] + del current_params["AggregationAuthorizationArn"] + del current_params["CreationTime"] if params != current_params: try: response = client.put_aggregation_authorization( - AuthorizedAccountId=params['AuthorizedAccountId'], - AuthorizedAwsRegion=params['AuthorizedAwsRegion'] + AuthorizedAccountId=params["AuthorizedAccountId"], + AuthorizedAwsRegion=params["AuthorizedAwsRegion"], ) - result['changed'] = True + result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization") @@ -110,10 +110,10 @@ def update_resource(client, module, params, result): def delete_resource(client, module, params, result): try: response = client.delete_aggregation_authorization( - AuthorizedAccountId=params['AuthorizedAccountId'], - AuthorizedAwsRegion=params['AuthorizedAwsRegion'] + AuthorizedAccountId=params["AuthorizedAccountId"], + AuthorizedAwsRegion=params["AuthorizedAwsRegion"], ) - result['changed'] = True + result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete AWS Aggregation authorization") @@ -122,35 +122,35 @@ def delete_resource(client, module, params, result): def main(): module = AnsibleAWSModule( argument_spec={ - 'state': dict(type='str', choices=['present', 'absent'], default='present'), - 'authorized_account_id': dict(type='str', required=True), - 'authorized_aws_region': dict(type='str', required=True), + "state": dict(type="str", choices=["present", "absent"], default="present"), + "authorized_account_id": dict(type="str", required=True), + "authorized_aws_region": dict(type="str", required=True), }, supports_check_mode=False, ) - result = {'changed': False} + result = {"changed": False} params = { - 'AuthorizedAccountId': module.params.get('authorized_account_id'), - 'AuthorizedAwsRegion': module.params.get('authorized_aws_region'), + "AuthorizedAccountId": module.params.get("authorized_account_id"), + "AuthorizedAwsRegion": module.params.get("authorized_aws_region"), } - client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("config", retry_decorator=AWSRetry.jittered_backoff()) resource_status = resource_exists(client, module, params) - if module.params.get('state') == 'present': + if module.params.get("state") == "present": if not resource_status: create_resource(client, module, params, result) else: update_resource(client, module, params, result) - if module.params.get('state') == 'absent': + if module.params.get("state") == "absent": if resource_status: delete_resource(client, module, params, result) - module.exit_json(changed=result['changed']) + module.exit_json(changed=result["changed"]) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/config_aggregator.py b/plugins/modules/config_aggregator.py index 38271fc4542..58866159028 100644 --- a/plugins/modules/config_aggregator.py +++ b/plugins/modules/config_aggregator.py @@ -105,50 +105,53 @@ def resource_exists(client, module, params): try: aggregator = client.describe_configuration_aggregators( - ConfigurationAggregatorNames=[params['ConfigurationAggregatorName']] + ConfigurationAggregatorNames=[params["ConfigurationAggregatorName"]] ) - return aggregator['ConfigurationAggregators'][0] - except is_boto3_error_code('NoSuchConfigurationAggregatorException'): + return aggregator["ConfigurationAggregators"][0] + except is_boto3_error_code("NoSuchConfigurationAggregatorException"): return - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) def create_resource(client, module, params, result): try: client.put_configuration_aggregator( - ConfigurationAggregatorName=params['ConfigurationAggregatorName'], - AccountAggregationSources=params['AccountAggregationSources'], - OrganizationAggregationSource=params['OrganizationAggregationSource'] + ConfigurationAggregatorName=params["ConfigurationAggregatorName"], + AccountAggregationSources=params["AccountAggregationSources"], + OrganizationAggregationSource=params["OrganizationAggregationSource"], ) - result['changed'] = True - result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + result["changed"] = True + result["aggregator"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator") def update_resource(client, module, params, result): - result['changed'] = False + result["changed"] = False current_params = client.describe_configuration_aggregators( - ConfigurationAggregatorNames=[params['ConfigurationAggregatorName']] - )['ConfigurationAggregators'][0] + ConfigurationAggregatorNames=[params["ConfigurationAggregatorName"]] + )["ConfigurationAggregators"][0] - if params['AccountAggregationSources'] != current_params.get('AccountAggregationSources', []): - result['changed'] = True + if params["AccountAggregationSources"] != current_params.get("AccountAggregationSources", []): + result["changed"] = True - if params['OrganizationAggregationSource'] != current_params.get('OrganizationAggregationSource', {}): - result['changed'] = True + if params["OrganizationAggregationSource"] != current_params.get("OrganizationAggregationSource", {}): + result["changed"] = True - if result['changed']: + if result["changed"]: try: client.put_configuration_aggregator( - ConfigurationAggregatorName=params['ConfigurationAggregatorName'], - AccountAggregationSources=params['AccountAggregationSources'], - OrganizationAggregationSource=params['OrganizationAggregationSource'] + ConfigurationAggregatorName=params["ConfigurationAggregatorName"], + AccountAggregationSources=params["AccountAggregationSources"], + OrganizationAggregationSource=params["OrganizationAggregationSource"], ) - result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + result["aggregator"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator") @@ -156,10 +159,8 @@ def update_resource(client, module, params, result): def delete_resource(client, module, params, result): try: - client.delete_configuration_aggregator( - ConfigurationAggregatorName=params['ConfigurationAggregatorName'] - ) - result['changed'] = True + client.delete_configuration_aggregator(ConfigurationAggregatorName=params["ConfigurationAggregatorName"]) + result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration aggregator") @@ -168,66 +169,64 @@ def delete_resource(client, module, params, result): def main(): module = AnsibleAWSModule( argument_spec={ - 'name': dict(type='str', required=True), - 'state': dict(type='str', choices=['present', 'absent'], default='present'), - 'account_sources': dict(type='list', required=True, elements='dict'), - 'organization_source': dict(type='dict', required=True) + "name": dict(type="str", required=True), + "state": dict(type="str", choices=["present", "absent"], default="present"), + "account_sources": dict(type="list", required=True, elements="dict"), + "organization_source": dict(type="dict", required=True), }, supports_check_mode=False, ) - result = { - 'changed': False - } + result = {"changed": False} - name = module.params.get('name') - state = module.params.get('state') + name = module.params.get("name") + state = module.params.get("state") params = {} if name: - params['ConfigurationAggregatorName'] = name - params['AccountAggregationSources'] = [] - if module.params.get('account_sources'): - for i in module.params.get('account_sources'): + params["ConfigurationAggregatorName"] = name + params["AccountAggregationSources"] = [] + if module.params.get("account_sources"): + for i in module.params.get("account_sources"): tmp_dict = {} - if i.get('account_ids'): - tmp_dict['AccountIds'] = i.get('account_ids') - if i.get('aws_regions'): - tmp_dict['AwsRegions'] = i.get('aws_regions') - if i.get('all_aws_regions') is not None: - tmp_dict['AllAwsRegions'] = i.get('all_aws_regions') - params['AccountAggregationSources'].append(tmp_dict) - if module.params.get('organization_source'): - params['OrganizationAggregationSource'] = {} - if module.params.get('organization_source').get('role_arn'): - params['OrganizationAggregationSource'].update({ - 'RoleArn': module.params.get('organization_source').get('role_arn') - }) - if module.params.get('organization_source').get('aws_regions'): - params['OrganizationAggregationSource'].update({ - 'AwsRegions': module.params.get('organization_source').get('aws_regions') - }) - if module.params.get('organization_source').get('all_aws_regions') is not None: - params['OrganizationAggregationSource'].update({ - 'AllAwsRegions': module.params.get('organization_source').get('all_aws_regions') - }) - - client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + if i.get("account_ids"): + tmp_dict["AccountIds"] = i.get("account_ids") + if i.get("aws_regions"): + tmp_dict["AwsRegions"] = i.get("aws_regions") + if i.get("all_aws_regions") is not None: + tmp_dict["AllAwsRegions"] = i.get("all_aws_regions") + params["AccountAggregationSources"].append(tmp_dict) + if module.params.get("organization_source"): + params["OrganizationAggregationSource"] = {} + if module.params.get("organization_source").get("role_arn"): + params["OrganizationAggregationSource"].update( + {"RoleArn": module.params.get("organization_source").get("role_arn")} + ) + if module.params.get("organization_source").get("aws_regions"): + params["OrganizationAggregationSource"].update( + {"AwsRegions": module.params.get("organization_source").get("aws_regions")} + ) + if module.params.get("organization_source").get("all_aws_regions") is not None: + params["OrganizationAggregationSource"].update( + {"AllAwsRegions": module.params.get("organization_source").get("all_aws_regions")} + ) + + client = module.client("config", retry_decorator=AWSRetry.jittered_backoff()) resource_status = resource_exists(client, module, params) - if state == 'present': + if state == "present": if not resource_status: create_resource(client, module, params, result) else: update_resource(client, module, params, result) - if state == 'absent': + if state == "absent": if resource_status: delete_resource(client, module, params, result) - module.exit_json(changed=result['changed']) + module.exit_json(changed=result["changed"]) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/config_delivery_channel.py b/plugins/modules/config_delivery_channel.py index 2dd5fbc68d2..aae8799de20 100644 --- a/plugins/modules/config_delivery_channel.py +++ b/plugins/modules/config_delivery_channel.py @@ -80,20 +80,23 @@ # this waits for an IAM role to become fully available, at the cost of # taking a long time to fail when the IAM role/policy really is invalid retry_unavailable_iam_on_put_delivery = AWSRetry.jittered_backoff( - catch_extra_error_codes=['InsufficientDeliveryPolicyException'], + catch_extra_error_codes=["InsufficientDeliveryPolicyException"], ) def resource_exists(client, module, params): try: channel = client.describe_delivery_channels( - DeliveryChannelNames=[params['name']], + DeliveryChannelNames=[params["name"]], aws_retry=True, ) - return channel['DeliveryChannels'][0] - except is_boto3_error_code('NoSuchDeliveryChannelException'): + return channel["DeliveryChannels"][0] + except is_boto3_error_code("NoSuchDeliveryChannelException"): return - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) @@ -104,49 +107,64 @@ def create_resource(client, module, params, result): )( DeliveryChannel=params, ) - result['changed'] = True - result['channel'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + result["changed"] = True + result["channel"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) return result - except is_boto3_error_code('InvalidS3KeyPrefixException') as e: - module.fail_json_aws(e, msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix") - except is_boto3_error_code('InsufficientDeliveryPolicyException') as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="The `s3_prefix` or `s3_bucket` parameter is invalid. " - "Make sure the bucket exists and is available") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't create AWS Config delivery channel") + except is_boto3_error_code("InvalidS3KeyPrefixException") as e: + module.fail_json_aws( + e, + msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix", + ) + except is_boto3_error_code("InsufficientDeliveryPolicyException") as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, + msg="The `s3_prefix` or `s3_bucket` parameter is invalid. Make sure the bucket exists and is available", + ) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, + msg="Couldn't create AWS Config delivery channel", + ) def update_resource(client, module, params, result): current_params = client.describe_delivery_channels( - DeliveryChannelNames=[params['name']], + DeliveryChannelNames=[params["name"]], aws_retry=True, ) - if params != current_params['DeliveryChannels'][0]: + if params != current_params["DeliveryChannels"][0]: try: retry_unavailable_iam_on_put_delivery( client.put_delivery_channel, )( DeliveryChannel=params, ) - result['changed'] = True - result['channel'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + result["changed"] = True + result["channel"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) return result - except is_boto3_error_code('InvalidS3KeyPrefixException') as e: + except is_boto3_error_code("InvalidS3KeyPrefixException") as e: module.fail_json_aws(e, msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix") - except is_boto3_error_code('InsufficientDeliveryPolicyException') as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="The `s3_prefix` or `s3_bucket` parameter is invalid. " - "Make sure the bucket exists and is available") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except is_boto3_error_code("InsufficientDeliveryPolicyException") as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, + msg="The `s3_prefix` or `s3_bucket` parameter is invalid. " + "Make sure the bucket exists and is available", + ) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't create AWS Config delivery channel") def delete_resource(client, module, params, result): try: - response = client.delete_delivery_channel( - DeliveryChannelName=params['name'] - ) - result['changed'] = True + response = client.delete_delivery_channel(DeliveryChannelName=params["name"]) + result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete AWS Config delivery channel") @@ -155,62 +173,58 @@ def delete_resource(client, module, params, result): def main(): module = AnsibleAWSModule( argument_spec={ - 'name': dict(type='str', required=True), - 'state': dict(type='str', choices=['present', 'absent'], default='present'), - 's3_bucket': dict(type='str', required=True), - 's3_prefix': dict(type='str'), - 'sns_topic_arn': dict(type='str'), - 'delivery_frequency': dict( - type='str', + "name": dict(type="str", required=True), + "state": dict(type="str", choices=["present", "absent"], default="present"), + "s3_bucket": dict(type="str", required=True), + "s3_prefix": dict(type="str"), + "sns_topic_arn": dict(type="str"), + "delivery_frequency": dict( + type="str", choices=[ - 'One_Hour', - 'Three_Hours', - 'Six_Hours', - 'Twelve_Hours', - 'TwentyFour_Hours' - ] + "One_Hour", + "Three_Hours", + "Six_Hours", + "Twelve_Hours", + "TwentyFour_Hours", + ], ), }, supports_check_mode=False, ) - result = { - 'changed': False - } + result = {"changed": False} - name = module.params.get('name') - state = module.params.get('state') + name = module.params.get("name") + state = module.params.get("state") params = {} if name: - params['name'] = name - if module.params.get('s3_bucket'): - params['s3BucketName'] = module.params.get('s3_bucket') - if module.params.get('s3_prefix'): - params['s3KeyPrefix'] = module.params.get('s3_prefix') - if module.params.get('sns_topic_arn'): - params['snsTopicARN'] = module.params.get('sns_topic_arn') - if module.params.get('delivery_frequency'): - params['configSnapshotDeliveryProperties'] = { - 'deliveryFrequency': module.params.get('delivery_frequency') - } - - client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + params["name"] = name + if module.params.get("s3_bucket"): + params["s3BucketName"] = module.params.get("s3_bucket") + if module.params.get("s3_prefix"): + params["s3KeyPrefix"] = module.params.get("s3_prefix") + if module.params.get("sns_topic_arn"): + params["snsTopicARN"] = module.params.get("sns_topic_arn") + if module.params.get("delivery_frequency"): + params["configSnapshotDeliveryProperties"] = {"deliveryFrequency": module.params.get("delivery_frequency")} + + client = module.client("config", retry_decorator=AWSRetry.jittered_backoff()) resource_status = resource_exists(client, module, params) - if state == 'present': + if state == "present": if not resource_status: create_resource(client, module, params, result) if resource_status: update_resource(client, module, params, result) - if state == 'absent': + if state == "absent": if resource_status: delete_resource(client, module, params, result) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/config_recorder.py b/plugins/modules/config_recorder.py index b310787b72d..2672664a5fe 100644 --- a/plugins/modules/config_recorder.py +++ b/plugins/modules/config_recorder.py @@ -93,40 +93,35 @@ def resource_exists(client, module, params): try: - recorder = client.describe_configuration_recorders( - ConfigurationRecorderNames=[params['name']] - ) - return recorder['ConfigurationRecorders'][0] - except is_boto3_error_code('NoSuchConfigurationRecorderException'): + recorder = client.describe_configuration_recorders(ConfigurationRecorderNames=[params["name"]]) + return recorder["ConfigurationRecorders"][0] + except is_boto3_error_code("NoSuchConfigurationRecorderException"): return - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) def create_resource(client, module, params, result): try: - response = client.put_configuration_recorder( - ConfigurationRecorder=params - ) - result['changed'] = True - result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + response = client.put_configuration_recorder(ConfigurationRecorder=params) + result["changed"] = True + result["recorder"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create AWS Config configuration recorder") def update_resource(client, module, params, result): - current_params = client.describe_configuration_recorders( - ConfigurationRecorderNames=[params['name']] - ) + current_params = client.describe_configuration_recorders(ConfigurationRecorderNames=[params["name"]]) - if params != current_params['ConfigurationRecorders'][0]: + if params != current_params["ConfigurationRecorders"][0]: try: - response = client.put_configuration_recorder( - ConfigurationRecorder=params - ) - result['changed'] = True - result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + response = client.put_configuration_recorder(ConfigurationRecorder=params) + result["changed"] = True + result["recorder"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't update AWS Config configuration recorder") @@ -134,77 +129,68 @@ def update_resource(client, module, params, result): def delete_resource(client, module, params, result): try: - response = client.delete_configuration_recorder( - ConfigurationRecorderName=params['name'] - ) - result['changed'] = True + response = client.delete_configuration_recorder(ConfigurationRecorderName=params["name"]) + result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration recorder") def main(): - module = AnsibleAWSModule( argument_spec={ - 'name': dict(type='str', required=True), - 'state': dict(type='str', choices=['present', 'absent'], default='present'), - 'role_arn': dict(type='str'), - 'recording_group': dict(type='dict'), + "name": dict(type="str", required=True), + "state": dict(type="str", choices=["present", "absent"], default="present"), + "role_arn": dict(type="str"), + "recording_group": dict(type="dict"), }, supports_check_mode=False, required_if=[ - ('state', 'present', ['role_arn', 'recording_group']), + ("state", "present", ["role_arn", "recording_group"]), ], ) - result = { - 'changed': False - } + result = {"changed": False} - name = module.params.get('name') - state = module.params.get('state') + name = module.params.get("name") + state = module.params.get("state") params = {} if name: - params['name'] = name - if module.params.get('role_arn'): - params['roleARN'] = module.params.get('role_arn') - if module.params.get('recording_group'): - params['recordingGroup'] = {} - if module.params.get('recording_group').get('all_supported') is not None: - params['recordingGroup'].update({ - 'allSupported': module.params.get('recording_group').get('all_supported') - }) - if module.params.get('recording_group').get('include_global_types') is not None: - params['recordingGroup'].update({ - 'includeGlobalResourceTypes': module.params.get('recording_group').get('include_global_types') - }) - if module.params.get('recording_group').get('resource_types'): - params['recordingGroup'].update({ - 'resourceTypes': module.params.get('recording_group').get('resource_types') - }) + params["name"] = name + if module.params.get("role_arn"): + params["roleARN"] = module.params.get("role_arn") + if module.params.get("recording_group"): + params["recordingGroup"] = {} + if module.params.get("recording_group").get("all_supported") is not None: + params["recordingGroup"].update({"allSupported": module.params.get("recording_group").get("all_supported")}) + if module.params.get("recording_group").get("include_global_types") is not None: + params["recordingGroup"].update( + {"includeGlobalResourceTypes": module.params.get("recording_group").get("include_global_types")} + ) + if module.params.get("recording_group").get("resource_types"): + params["recordingGroup"].update( + {"resourceTypes": module.params.get("recording_group").get("resource_types")} + ) else: - params['recordingGroup'].update({ - 'resourceTypes': [] - }) + params["recordingGroup"].update({"resourceTypes": []}) - client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("config", retry_decorator=AWSRetry.jittered_backoff()) resource_status = resource_exists(client, module, params) - if state == 'present': + if state == "present": if not resource_status: create_resource(client, module, params, result) if resource_status: update_resource(client, module, params, result) - if state == 'absent': + if state == "absent": if resource_status: delete_resource(client, module, params, result) - module.exit_json(changed=result['changed']) + module.exit_json(changed=result["changed"]) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/config_rule.py b/plugins/modules/config_rule.py index cae18b2a0a4..3b49c17465e 100644 --- a/plugins/modules/config_rule.py +++ b/plugins/modules/config_rule.py @@ -122,22 +122,23 @@ def rule_exists(client, module, params): try: rule = client.describe_config_rules( - ConfigRuleNames=[params['ConfigRuleName']], + ConfigRuleNames=[params["ConfigRuleName"]], aws_retry=True, ) - return rule['ConfigRules'][0] - except is_boto3_error_code('NoSuchConfigRuleException'): + return rule["ConfigRules"][0] + except is_boto3_error_code("NoSuchConfigRuleException"): return - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) def create_resource(client, module, params, result): try: - client.put_config_rule( - ConfigRule=params - ) - result['changed'] = True + client.put_config_rule(ConfigRule=params) + result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create AWS Config rule") @@ -145,21 +146,19 @@ def create_resource(client, module, params, result): def update_resource(client, module, params, result): current_params = client.describe_config_rules( - ConfigRuleNames=[params['ConfigRuleName']], + ConfigRuleNames=[params["ConfigRuleName"]], aws_retry=True, ) - del current_params['ConfigRules'][0]['ConfigRuleArn'] - del current_params['ConfigRules'][0]['ConfigRuleId'] - del current_params['ConfigRules'][0]['EvaluationModes'] + del current_params["ConfigRules"][0]["ConfigRuleArn"] + del current_params["ConfigRules"][0]["ConfigRuleId"] + del current_params["ConfigRules"][0]["EvaluationModes"] - if params != current_params['ConfigRules'][0]: + if params != current_params["ConfigRules"][0]: try: - client.put_config_rule( - ConfigRule=params - ) - result['changed'] = True - result['rule'] = camel_dict_to_snake_dict(rule_exists(client, module, params)) + client.put_config_rule(ConfigRule=params) + result["changed"] = True + result["rule"] = camel_dict_to_snake_dict(rule_exists(client, module, params)) return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create AWS Config rule") @@ -168,11 +167,11 @@ def update_resource(client, module, params, result): def delete_resource(client, module, params, result): try: response = client.delete_config_rule( - ConfigRuleName=params['ConfigRuleName'], + ConfigRuleName=params["ConfigRuleName"], aws_retry=True, ) - result['changed'] = True - result['rule'] = {} + result["changed"] = True + result["rule"] = {} return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete AWS Config rule") @@ -181,93 +180,105 @@ def delete_resource(client, module, params, result): def main(): module = AnsibleAWSModule( argument_spec={ - 'name': dict(type='str', required=True), - 'state': dict(type='str', choices=['present', 'absent'], default='present'), - 'description': dict(type='str'), - 'scope': dict(type='dict'), - 'source': dict(type='dict', required=True), - 'input_parameters': dict(type='str'), - 'execution_frequency': dict( - type='str', + "name": dict(type="str", required=True), + "state": dict(type="str", choices=["present", "absent"], default="present"), + "description": dict(type="str"), + "scope": dict(type="dict"), + "source": dict(type="dict", required=True), + "input_parameters": dict(type="str"), + "execution_frequency": dict( + type="str", choices=[ - 'One_Hour', - 'Three_Hours', - 'Six_Hours', - 'Twelve_Hours', - 'TwentyFour_Hours' - ] + "One_Hour", + "Three_Hours", + "Six_Hours", + "Twelve_Hours", + "TwentyFour_Hours", + ], ), }, supports_check_mode=False, ) - result = { - 'changed': False - } + result = {"changed": False} - name = module.params.get('name') - resource_type = module.params.get('resource_type') - state = module.params.get('state') + name = module.params.get("name") + resource_type = module.params.get("resource_type") + state = module.params.get("state") params = {} if name: - params['ConfigRuleName'] = name - if module.params.get('description'): - params['Description'] = module.params.get('description') - if module.params.get('scope'): - params['Scope'] = {} - if module.params.get('scope').get('compliance_types'): - params['Scope'].update({ - 'ComplianceResourceTypes': module.params.get('scope').get('compliance_types') - }) - if module.params.get('scope').get('tag_key'): - params['Scope'].update({ - 'TagKey': module.params.get('scope').get('tag_key') - }) - if module.params.get('scope').get('tag_value'): - params['Scope'].update({ - 'TagValue': module.params.get('scope').get('tag_value') - }) - if module.params.get('scope').get('compliance_id'): - params['Scope'].update({ - 'ComplianceResourceId': module.params.get('scope').get('compliance_id') - }) - if module.params.get('source'): - params['Source'] = {} - if module.params.get('source').get('owner'): - params['Source'].update({ - 'Owner': module.params.get('source').get('owner') - }) - if module.params.get('source').get('identifier'): - params['Source'].update({ - 'SourceIdentifier': module.params.get('source').get('identifier') - }) - if module.params.get('source').get('details'): - params['Source'].update({ - 'SourceDetails': module.params.get('source').get('details') - }) - if module.params.get('input_parameters'): - params['InputParameters'] = module.params.get('input_parameters') - if module.params.get('execution_frequency'): - params['MaximumExecutionFrequency'] = module.params.get('execution_frequency') - params['ConfigRuleState'] = 'ACTIVE' - - client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + params["ConfigRuleName"] = name + if module.params.get("description"): + params["Description"] = module.params.get("description") + if module.params.get("scope"): + params["Scope"] = {} + if module.params.get("scope").get("compliance_types"): + params["Scope"].update( + { + "ComplianceResourceTypes": module.params.get("scope").get("compliance_types"), + } + ) + if module.params.get("scope").get("tag_key"): + params["Scope"].update( + { + "TagKey": module.params.get("scope").get("tag_key"), + } + ) + if module.params.get("scope").get("tag_value"): + params["Scope"].update( + { + "TagValue": module.params.get("scope").get("tag_value"), + } + ) + if module.params.get("scope").get("compliance_id"): + params["Scope"].update( + { + "ComplianceResourceId": module.params.get("scope").get("compliance_id"), + } + ) + if module.params.get("source"): + params["Source"] = {} + if module.params.get("source").get("owner"): + params["Source"].update( + { + "Owner": module.params.get("source").get("owner"), + } + ) + if module.params.get("source").get("identifier"): + params["Source"].update( + { + "SourceIdentifier": module.params.get("source").get("identifier"), + } + ) + if module.params.get("source").get("details"): + params["Source"].update( + { + "SourceDetails": module.params.get("source").get("details"), + } + ) + if module.params.get("input_parameters"): + params["InputParameters"] = module.params.get("input_parameters") + if module.params.get("execution_frequency"): + params["MaximumExecutionFrequency"] = module.params.get("execution_frequency") + params["ConfigRuleState"] = "ACTIVE" + + client = module.client("config", retry_decorator=AWSRetry.jittered_backoff()) existing_rule = rule_exists(client, module, params) - if state == 'present': + if state == "present": if not existing_rule: create_resource(client, module, params, result) else: update_resource(client, module, params, result) - if state == 'absent': + if state == "absent": if existing_rule: delete_resource(client, module, params, result) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/data_pipeline.py b/plugins/modules/data_pipeline.py index 51068159507..d30be5c847d 100644 --- a/plugins/modules/data_pipeline.py +++ b/plugins/modules/data_pipeline.py @@ -211,11 +211,11 @@ from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -DP_ACTIVE_STATES = ['ACTIVE', 'SCHEDULED'] -DP_INACTIVE_STATES = ['INACTIVE', 'PENDING', 'FINISHED', 'DELETING'] -DP_ACTIVATING_STATE = 'ACTIVATING' -DP_DEACTIVATING_STATE = 'DEACTIVATING' -PIPELINE_DOESNT_EXIST = '^.*Pipeline with id: {0} does not exist$' +DP_ACTIVE_STATES = ["ACTIVE", "SCHEDULED"] +DP_INACTIVE_STATES = ["INACTIVE", "PENDING", "FINISHED", "DELETING"] +DP_ACTIVATING_STATE = "ACTIVATING" +DP_DEACTIVATING_STATE = "DEACTIVATING" +PIPELINE_DOESNT_EXIST = "^.*Pipeline with id: {0} does not exist$" class DataPipelineNotFound(Exception): @@ -236,9 +236,9 @@ def pipeline_id(client, name): """ pipelines = client.list_pipelines() - for dp in pipelines['pipelineIdList']: - if dp['name'] == name: - return dp['id'] + for dp in pipelines["pipelineIdList"]: + if dp["name"] == name: + return dp["id"] raise DataPipelineNotFound @@ -252,7 +252,7 @@ def pipeline_description(client, dp_id): """ try: return client.describe_pipelines(pipelineIds=[dp_id]) - except is_boto3_error_code(['PipelineNotFoundException', 'PipelineDeletedException']): + except is_boto3_error_code(["PipelineNotFoundException", "PipelineDeletedException"]): raise DataPipelineNotFound @@ -268,9 +268,9 @@ def pipeline_field(client, dp_id, field): """ dp_description = pipeline_description(client, dp_id) - for field_key in dp_description['pipelineDescriptionList'][0]['fields']: - if field_key['key'] == field: - return field_key['stringValue'] + for field_key in dp_description["pipelineDescriptionList"][0]["fields"]: + if field_key["key"] == field: + return field_key["stringValue"] raise KeyError("Field key {0} not found!".format(field)) @@ -343,70 +343,70 @@ def pipeline_exists_timeout(client, dp_id, timeout): def activate_pipeline(client, module): - """Activates pipeline - - """ - dp_name = module.params.get('name') - timeout = module.params.get('timeout') + """Activates pipeline""" + dp_name = module.params.get("name") + timeout = module.params.get("timeout") try: dp_id = pipeline_id(client, dp_name) except DataPipelineNotFound: - module.fail_json(msg='Data Pipeline {0} not found'.format(dp_name)) + module.fail_json(msg="Data Pipeline {0} not found".format(dp_name)) if pipeline_field(client, dp_id, field="@pipelineState") in DP_ACTIVE_STATES: changed = False else: try: client.activate_pipeline(pipelineId=dp_id) - except is_boto3_error_code('InvalidRequestException'): + except is_boto3_error_code("InvalidRequestException"): module.fail_json(msg="You need to populate your pipeline before activation.") try: - pipeline_status_timeout(client, dp_id, status=DP_ACTIVE_STATES, - timeout=timeout) + pipeline_status_timeout(client, dp_id, status=DP_ACTIVE_STATES, timeout=timeout) except TimeOutException: if pipeline_field(client, dp_id, field="@pipelineState") == "FINISHED": # activated but completed more rapidly than it was checked pass else: - module.fail_json(msg=('Data Pipeline {0} failed to activate ' - 'within timeout {1} seconds').format(dp_name, timeout)) + module.fail_json( + msg=f"Data Pipeline {dp_name} failed to activate within timeout {timeout} seconds", + ) changed = True data_pipeline = get_result(client, dp_id) - result = {'data_pipeline': data_pipeline, - 'msg': 'Data Pipeline {0} activated.'.format(dp_name)} + result = { + "data_pipeline": data_pipeline, + "msg": f"Data Pipeline {dp_name} activated.", + } return (changed, result) def deactivate_pipeline(client, module): - """Deactivates pipeline - - """ - dp_name = module.params.get('name') - timeout = module.params.get('timeout') + """Deactivates pipeline""" + dp_name = module.params.get("name") + timeout = module.params.get("timeout") try: dp_id = pipeline_id(client, dp_name) except DataPipelineNotFound: - module.fail_json(msg='Data Pipeline {0} not found'.format(dp_name)) + module.fail_json(msg="Data Pipeline {0} not found".format(dp_name)) if pipeline_field(client, dp_id, field="@pipelineState") in DP_INACTIVE_STATES: changed = False else: client.deactivate_pipeline(pipelineId=dp_id) try: - pipeline_status_timeout(client, dp_id, status=DP_INACTIVE_STATES, - timeout=timeout) + pipeline_status_timeout(client, dp_id, status=DP_INACTIVE_STATES, timeout=timeout) except TimeOutException: - module.fail_json(msg=('Data Pipeline {0} failed to deactivate' - 'within timeout {1} seconds').format(dp_name, timeout)) + module.fail_json( + msg=f"Data Pipeline {dp_name} failed to deactivate within timeout {timeout} seconds", + ) changed = True data_pipeline = get_result(client, dp_id) - result = {'data_pipeline': data_pipeline, - 'msg': 'Data Pipeline {0} deactivated.'.format(dp_name)} + result = { + "data_pipeline": data_pipeline, + "msg": f"Data Pipeline {dp_name} deactivated.", + } return (changed, result) @@ -420,11 +420,9 @@ def _delete_dp_with_check(dp_id, client, timeout): def delete_pipeline(client, module): - """Deletes pipeline - - """ - dp_name = module.params.get('name') - timeout = module.params.get('timeout') + """Deletes pipeline""" + dp_name = module.params.get("name") + timeout = module.params.get("timeout") try: dp_id = pipeline_id(client, dp_name) @@ -433,10 +431,13 @@ def delete_pipeline(client, module): except DataPipelineNotFound: changed = False except TimeOutException: - module.fail_json(msg=('Data Pipeline {0} failed to delete' - 'within timeout {1} seconds').format(dp_name, timeout)) - result = {'data_pipeline': {}, - 'msg': 'Data Pipeline {0} deleted'.format(dp_name)} + module.fail_json( + msg=f"Data Pipeline {dp_name} failed to delete within timeout {timeout} seconds", + ) + result = { + "data_pipeline": {}, + "msg": f"Data Pipeline {dp_name} deleted", + } return (changed, result) @@ -444,14 +445,14 @@ def delete_pipeline(client, module): def build_unique_id(module): data = dict(module.params) # removing objects from the unique id so we can update objects or populate the pipeline after creation without needing to make a new pipeline - [data.pop(each, None) for each in ('objects', 'timeout')] + [data.pop(each, None) for each in ("objects", "timeout")] json_data = json.dumps(data, sort_keys=True).encode("utf-8") hashed_data = hashlib.md5(json_data).hexdigest() return hashed_data def format_tags(tags): - """ Reformats tags + """Reformats tags :param dict tags: dict of data pipeline tags (e.g. {key1: val1, key2: val2, key3: val3}) :returns: list of dicts (e.g. [{key: key1, value: val1}, {key: key2, value: val2}, {key: key3, value: val3}]) @@ -461,16 +462,16 @@ def format_tags(tags): def get_result(client, dp_id): - """ Get the current state of the data pipeline and reformat it to snake_case for exit_json + """Get the current state of the data pipeline and reformat it to snake_case for exit_json :param object client: boto3 datapipeline client :param string dp_id: pipeline id :returns: reformatted dict of pipeline description - """ + """ # pipeline_description returns a pipelineDescriptionList of length 1 # dp is a dict with keys "description" (str), "fields" (list), "name" (str), "pipelineId" (str), "tags" (dict) - dp = pipeline_description(client, dp_id)['pipelineDescriptionList'][0] + dp = pipeline_description(client, dp_id)["pipelineDescriptionList"][0] # Get uniqueId and pipelineState in fields to add to the exit_json result dp["unique_id"] = pipeline_field(client, dp_id, field="uniqueId") @@ -487,8 +488,7 @@ def get_result(client, dp_id): def diff_pipeline(client, module, objects, unique_id, dp_name): - """Check if there's another pipeline with the same unique_id and if so, checks if the object needs to be updated - """ + """Check if there's another pipeline with the same unique_id and if so, checks if the object needs to be updated""" result = {} changed = False create_dp = False @@ -504,16 +504,18 @@ def diff_pipeline(client, module, objects, unique_id, dp_name): create_dp = True # Unique ids are the same - check if pipeline needs modification else: - dp_objects = client.get_pipeline_definition(pipelineId=dp_id)['pipelineObjects'] + dp_objects = client.get_pipeline_definition(pipelineId=dp_id)["pipelineObjects"] # Definition needs to be updated if dp_objects != objects: changed, msg = define_pipeline(client, module, objects, dp_id) # No changes else: - msg = 'Data Pipeline {0} is present'.format(dp_name) + msg = f"Data Pipeline {dp_name} is present" data_pipeline = get_result(client, dp_id) - result = {'data_pipeline': data_pipeline, - 'msg': msg} + result = { + "data_pipeline": data_pipeline, + "msg": msg, + } except DataPipelineNotFound: create_dp = True @@ -521,30 +523,30 @@ def diff_pipeline(client, module, objects, unique_id, dp_name): def define_pipeline(client, module, objects, dp_id): - """Puts pipeline definition - - """ - dp_name = module.params.get('name') + """Puts pipeline definition""" + dp_name = module.params.get("name") if pipeline_field(client, dp_id, field="@pipelineState") == "FINISHED": - msg = 'Data Pipeline {0} is unable to be updated while in state FINISHED.'.format(dp_name) + msg = "Data Pipeline {0} is unable to be updated while in state FINISHED.".format(dp_name) changed = False elif objects: - parameters = module.params.get('parameters') - values = module.params.get('values') + parameters = module.params.get("parameters") + values = module.params.get("values") try: - client.put_pipeline_definition(pipelineId=dp_id, - pipelineObjects=objects, - parameterObjects=parameters, - parameterValues=values) - msg = 'Data Pipeline {0} has been updated.'.format(dp_name) + client.put_pipeline_definition( + pipelineId=dp_id, pipelineObjects=objects, parameterObjects=parameters, parameterValues=values + ) + msg = "Data Pipeline {0} has been updated.".format(dp_name) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to put the definition for pipeline {0}. Check that string/reference fields" - "are not empty and that the number of objects in the pipeline does not exceed maximum allowed" - "objects".format(dp_name)) + module.fail_json_aws( + e, + msg=f"Failed to put the definition for pipeline {dp_name}. Check that string/reference fields" + "are not empty and that the number of objects in the pipeline does not exceed maximum allowed" + "objects", + ) else: changed = False msg = "" @@ -553,14 +555,12 @@ def define_pipeline(client, module, objects, dp_id): def create_pipeline(client, module): - """Creates datapipeline. Uses uniqueId to achieve idempotency. - - """ - dp_name = module.params.get('name') - objects = module.params.get('objects', None) - description = module.params.get('description', '') - tags = module.params.get('tags') - timeout = module.params.get('timeout') + """Creates datapipeline. Uses uniqueId to achieve idempotency.""" + dp_name = module.params.get("name") + objects = module.params.get("objects", None) + description = module.params.get("description", "") + tags = module.params.get("tags") + timeout = module.params.get("timeout") unique_id = build_unique_id(module) create_dp, changed, result = diff_pipeline(client, module, objects, unique_id, dp_name) @@ -574,24 +574,27 @@ def create_pipeline(client, module): # Make pipeline try: tags = format_tags(tags) - dp = client.create_pipeline(name=dp_name, - uniqueId=unique_id, - description=description, - tags=tags) - dp_id = dp['pipelineId'] + dp = client.create_pipeline(name=dp_name, uniqueId=unique_id, description=description, tags=tags) + dp_id = dp["pipelineId"] pipeline_exists_timeout(client, dp_id, timeout) except TimeOutException: - module.fail_json(msg=('Data Pipeline {0} failed to create' - 'within timeout {1} seconds').format(dp_name, timeout)) + module.fail_json( + msg=f"Data Pipeline {dp_name} failed to create within timeout {timeout} seconds", + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to create the data pipeline {0}.".format(dp_name)) + module.fail_json_aws( + e, + msg=f"Failed to create the data pipeline {dp_name}.", + ) # Put pipeline definition changed, msg = define_pipeline(client, module, objects, dp_id) changed = True data_pipeline = get_result(client, dp_id) - result = {'data_pipeline': data_pipeline, - 'msg': 'Data Pipeline {0} created.'.format(dp_name) + msg} + result = { + "data_pipeline": data_pipeline, + "msg": f"Data Pipeline {dp_name} created." + msg, + } return (changed, result) @@ -599,34 +602,33 @@ def create_pipeline(client, module): def main(): argument_spec = dict( name=dict(required=True), - description=dict(required=False, default=''), - objects=dict(required=False, type='list', default=[], elements='dict'), - parameters=dict(required=False, type='list', default=[], elements='dict'), - timeout=dict(required=False, type='int', default=300), - state=dict(default='present', choices=['present', 'absent', - 'active', 'inactive']), - tags=dict(required=False, type='dict', default={}, aliases=['resource_tags']), - values=dict(required=False, type='list', default=[], elements='dict'), + description=dict(required=False, default=""), + objects=dict(required=False, type="list", default=[], elements="dict"), + parameters=dict(required=False, type="list", default=[], elements="dict"), + timeout=dict(required=False, type="int", default=300), + state=dict(default="present", choices=["present", "absent", "active", "inactive"]), + tags=dict(required=False, type="dict", default={}, aliases=["resource_tags"]), + values=dict(required=False, type="list", default=[], elements="dict"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False) try: - client = module.client('datapipeline') + client = module.client("datapipeline") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - state = module.params.get('state') - if state == 'present': + state = module.params.get("state") + if state == "present": changed, result = create_pipeline(client, module) - elif state == 'absent': + elif state == "absent": changed, result = delete_pipeline(client, module) - elif state == 'active': + elif state == "active": changed, result = activate_pipeline(client, module) - elif state == 'inactive': + elif state == "inactive": changed, result = deactivate_pipeline(client, module) module.exit_json(result=result, changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/directconnect_confirm_connection.py b/plugins/modules/directconnect_confirm_connection.py index 25aeebb244c..e8e0f2c6b08 100644 --- a/plugins/modules/directconnect_confirm_connection.py +++ b/plugins/modules/directconnect_confirm_connection.py @@ -82,7 +82,7 @@ def describe_connections(client, params): def find_connection_id(client, connection_id=None, connection_name=None): params = {} if connection_id: - params['connectionId'] = connection_id + params["connectionId"] = connection_id try: response = describe_connections(client, params) except (BotoCoreError, ClientError) as e: @@ -90,18 +90,20 @@ def find_connection_id(client, connection_id=None, connection_name=None): msg = "Failed to describe DirectConnect ID {0}".format(connection_id) else: msg = "Failed to describe DirectConnect connections" - raise DirectConnectError(msg=msg, - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg=msg, + last_traceback=traceback.format_exc(), + exception=e, + ) match = [] - if len(response.get('connections', [])) == 1 and connection_id: - if response['connections'][0]['connectionState'] != 'deleted': - match.append(response['connections'][0]['connectionId']) + if len(response.get("connections", [])) == 1 and connection_id: + if response["connections"][0]["connectionState"] != "deleted": + match.append(response["connections"][0]["connectionId"]) - for conn in response.get('connections', []): - if connection_name == conn['connectionName'] and conn['connectionState'] != 'deleted': - match.append(conn['connectionId']) + for conn in response.get("connections", []): + if connection_name == conn["connectionName"] and conn["connectionState"] != "deleted": + match.append(conn["connectionId"]) if len(match) == 1: return match[0] @@ -112,34 +114,33 @@ def find_connection_id(client, connection_id=None, connection_name=None): def get_connection_state(client, connection_id): try: response = describe_connections(client, dict(connectionId=connection_id)) - return response['connections'][0]['connectionState'] + return response["connections"][0]["connectionState"] except (BotoCoreError, ClientError, IndexError) as e: - raise DirectConnectError(msg="Failed to describe DirectConnect connection {0} state".format(connection_id), - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg="Failed to describe DirectConnect connection {0} state".format(connection_id), + last_traceback=traceback.format_exc(), + exception=e, + ) def main(): - argument_spec = dict( - connection_id=dict(), - name=dict() + argument_spec = dict(connection_id=dict(), name=dict()) + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[["connection_id", "name"]], + required_one_of=[["connection_id", "name"]], ) - module = AnsibleAWSModule(argument_spec=argument_spec, - mutually_exclusive=[['connection_id', 'name']], - required_one_of=[['connection_id', 'name']]) - client = module.client('directconnect') + client = module.client("directconnect") - connection_id = module.params['connection_id'] - connection_name = module.params['name'] + connection_id = module.params["connection_id"] + connection_name = module.params["name"] changed = False connection_state = None try: - connection_id = find_connection_id(client, - connection_id, - connection_name) + connection_id = find_connection_id(client, connection_id, connection_name) connection_state = get_connection_state(client, connection_id) - if connection_state == 'ordering': + if connection_state == "ordering": client.confirm_connection(connectionId=connection_id) changed = True connection_state = get_connection_state(client, connection_id) @@ -152,5 +153,5 @@ def main(): module.exit_json(changed=changed, connection_state=connection_state) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/directconnect_connection.py b/plugins/modules/directconnect_connection.py index 709fef7a79f..176d83392d4 100644 --- a/plugins/modules/directconnect_connection.py +++ b/plugins/modules/directconnect_connection.py @@ -182,7 +182,7 @@ def connection_status(client, connection_id): def connection_exists(client, connection_id=None, connection_name=None, verify=True): params = {} if connection_id: - params['connectionId'] = connection_id + params["connectionId"] = connection_id try: response = AWSRetry.jittered_backoff(**retry_params)(client.describe_connections)(**params) except (BotoCoreError, ClientError) as e: @@ -190,23 +190,21 @@ def connection_exists(client, connection_id=None, connection_name=None, verify=T msg = "Failed to describe DirectConnect ID {0}".format(connection_id) else: msg = "Failed to describe DirectConnect connections" - raise DirectConnectError(msg=msg, - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError(msg=msg, last_traceback=traceback.format_exc(), exception=e) match = [] connection = [] # look for matching connections - if len(response.get('connections', [])) == 1 and connection_id: - if response['connections'][0]['connectionState'] != 'deleted': - match.append(response['connections'][0]['connectionId']) - connection.extend(response['connections']) + if len(response.get("connections", [])) == 1 and connection_id: + if response["connections"][0]["connectionState"] != "deleted": + match.append(response["connections"][0]["connectionId"]) + connection.extend(response["connections"]) - for conn in response.get('connections', []): - if connection_name == conn['connectionName'] and conn['connectionState'] != 'deleted': - match.append(conn['connectionId']) + for conn in response.get("connections", []): + if connection_name == conn["connectionName"] and conn["connectionState"] != "deleted": + match.append(conn["connectionId"]) connection.append(conn) # verifying if the connections exists; if true, return connection identifier, otherwise return False @@ -216,33 +214,35 @@ def connection_exists(client, connection_id=None, connection_name=None, verify=T return False # not verifying if the connection exists; just return current connection info elif len(connection) == 1: - return {'connection': connection[0]} - return {'connection': {}} + return {"connection": connection[0]} + return {"connection": {}} def create_connection(client, location, bandwidth, name, lag_id): if not name: raise DirectConnectError(msg="Failed to create a Direct Connect connection: name required.") params = { - 'location': location, - 'bandwidth': bandwidth, - 'connectionName': name, + "location": location, + "bandwidth": bandwidth, + "connectionName": name, } if lag_id: - params['lagId'] = lag_id + params["lagId"] = lag_id try: connection = AWSRetry.jittered_backoff(**retry_params)(client.create_connection)(**params) except (BotoCoreError, ClientError) as e: - raise DirectConnectError(msg="Failed to create DirectConnect connection {0}".format(name), - last_traceback=traceback.format_exc(), - exception=e) - return connection['connectionId'] + raise DirectConnectError( + msg="Failed to create DirectConnect connection {0}".format(name), + last_traceback=traceback.format_exc(), + exception=e, + ) + return connection["connectionId"] def changed_properties(current_status, location, bandwidth): - current_bandwidth = current_status['bandwidth'] - current_location = current_status['location'] + current_bandwidth = current_status["bandwidth"] + current_location = current_status["location"] return current_bandwidth != bandwidth or current_location != location @@ -250,10 +250,10 @@ def changed_properties(current_status, location, bandwidth): @AWSRetry.jittered_backoff(**retry_params) def update_associations(client, latest_state, connection_id, lag_id): changed = False - if 'lagId' in latest_state and lag_id != latest_state['lagId']: - disassociate_connection_and_lag(client, connection_id, lag_id=latest_state['lagId']) + if "lagId" in latest_state and lag_id != latest_state["lagId"]: + disassociate_connection_and_lag(client, connection_id, lag_id=latest_state["lagId"]) changed = True - if (changed and lag_id) or (lag_id and 'lagId' not in latest_state): + if (changed and lag_id) or (lag_id and "lagId" not in latest_state): associate_connection_and_lag(client, connection_id, lag_id) changed = True return changed @@ -262,16 +262,18 @@ def update_associations(client, latest_state, connection_id, lag_id): def ensure_present(client, connection_id, connection_name, location, bandwidth, lag_id, forced_update): # the connection is found; get the latest state and see if it needs to be updated if connection_id: - latest_state = connection_status(client, connection_id=connection_id)['connection'] + latest_state = connection_status(client, connection_id=connection_id)["connection"] if changed_properties(latest_state, location, bandwidth) and forced_update: ensure_absent(client, connection_id) - return ensure_present(client=client, - connection_id=None, - connection_name=connection_name, - location=location, - bandwidth=bandwidth, - lag_id=lag_id, - forced_update=forced_update) + return ensure_present( + client=client, + connection_id=None, + connection_name=connection_name, + location=location, + bandwidth=bandwidth, + lag_id=lag_id, + forced_update=forced_update, + ) elif update_associations(client, latest_state, connection_id, lag_id): return True, connection_id @@ -294,53 +296,59 @@ def ensure_absent(client, connection_id): def main(): argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), + state=dict(required=True, choices=["present", "absent"]), name=dict(), location=dict(), - bandwidth=dict(choices=['1Gbps', '10Gbps']), + bandwidth=dict(choices=["1Gbps", "10Gbps"]), link_aggregation_group=dict(), connection_id=dict(), - forced_update=dict(type='bool', default=False) + forced_update=dict(type="bool", default=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_one_of=[('connection_id', 'name')], - required_if=[('state', 'present', ('location', 'bandwidth'))] + required_one_of=[("connection_id", "name")], + required_if=[("state", "present", ("location", "bandwidth"))], ) - connection = module.client('directconnect') + connection = module.client("directconnect") - state = module.params.get('state') + state = module.params.get("state") try: connection_id = connection_exists( - connection, - connection_id=module.params.get('connection_id'), - connection_name=module.params.get('name') + connection, connection_id=module.params.get("connection_id"), connection_name=module.params.get("name") ) - if not connection_id and module.params.get('connection_id'): - module.fail_json(msg="The Direct Connect connection {0} does not exist.".format(module.params.get('connection_id'))) - - if state == 'present': - changed, connection_id = ensure_present(connection, - connection_id=connection_id, - connection_name=module.params.get('name'), - location=module.params.get('location'), - bandwidth=module.params.get('bandwidth'), - lag_id=module.params.get('link_aggregation_group'), - forced_update=module.params.get('forced_update')) + if not connection_id and module.params.get("connection_id"): + module.fail_json( + msg=f"The Direct Connect connection {module.params['connection_id']} does not exist.", + ) + + if state == "present": + changed, connection_id = ensure_present( + connection, + connection_id=connection_id, + connection_name=module.params.get("name"), + location=module.params.get("location"), + bandwidth=module.params.get("bandwidth"), + lag_id=module.params.get("link_aggregation_group"), + forced_update=module.params.get("forced_update"), + ) response = connection_status(connection, connection_id) - elif state == 'absent': + elif state == "absent": changed = ensure_absent(connection, connection_id) response = {} except DirectConnectError as e: if e.last_traceback: - module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception.response)) + module.fail_json( + msg=e.msg, + exception=e.last_traceback, + **camel_dict_to_snake_dict(e.exception.response), + ) else: module.fail_json(msg=e.msg) module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/directconnect_gateway.py b/plugins/modules/directconnect_gateway.py index 53fb47c9603..b231f0e8f44 100644 --- a/plugins/modules/directconnect_gateway.py +++ b/plugins/modules/directconnect_gateway.py @@ -110,11 +110,12 @@ def dx_gateway_info(client, gateway_id, module): try: resp = client.describe_direct_connect_gateways( - directConnectGatewayId=gateway_id) + directConnectGatewayId=gateway_id, + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to fetch gateway information.") - if resp['directConnectGateways']: - return resp['directConnectGateways'][0] + if resp["directConnectGateways"]: + return resp["directConnectGateways"][0] def wait_for_status(client, module, gateway_id, virtual_gateway_id, status): @@ -128,9 +129,10 @@ def wait_for_status(client, module, gateway_id, virtual_gateway_id, status): client, module, gateway_id=gateway_id, - virtual_gateway_id=virtual_gateway_id) - if response['directConnectGatewayAssociations']: - if response['directConnectGatewayAssociations'][0]['associationState'] == status: + virtual_gateway_id=virtual_gateway_id, + ) + if response["directConnectGatewayAssociations"]: + if response["directConnectGatewayAssociations"][0]["associationState"] == status: status_achieved = True break else: @@ -147,17 +149,18 @@ def wait_for_status(client, module, gateway_id, virtual_gateway_id, status): def associate_direct_connect_gateway(client, module, gateway_id): params = dict() - params['virtual_gateway_id'] = module.params.get('virtual_gateway_id') + params["virtual_gateway_id"] = module.params.get("virtual_gateway_id") try: response = client.create_direct_connect_gateway_association( directConnectGatewayId=gateway_id, - virtualGatewayId=params['virtual_gateway_id']) + virtualGatewayId=params["virtual_gateway_id"], + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, 'Failed to associate gateway') + module.fail_json_aws(e, "Failed to associate gateway") - status_achieved, dxgw = wait_for_status(client, module, gateway_id, params['virtual_gateway_id'], 'associating') + status_achieved, dxgw = wait_for_status(client, module, gateway_id, params["virtual_gateway_id"], "associating") if not status_achieved: - module.fail_json(msg='Error waiting for dxgw to attach to vpg - please check the AWS console') + module.fail_json(msg="Error waiting for dxgw to attach to vpg - please check the AWS console") result = response return result @@ -167,13 +170,14 @@ def delete_association(client, module, gateway_id, virtual_gateway_id): try: response = client.delete_direct_connect_gateway_association( directConnectGatewayId=gateway_id, - virtualGatewayId=virtual_gateway_id) + virtualGatewayId=virtual_gateway_id, + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to delete gateway association.") - status_achieved, dxgw = wait_for_status(client, module, gateway_id, virtual_gateway_id, 'disassociating') + status_achieved, dxgw = wait_for_status(client, module, gateway_id, virtual_gateway_id, "disassociating") if not status_achieved: - module.fail_json(msg='Error waiting for dxgw to detach from vpg - please check the AWS console') + module.fail_json(msg="Error waiting for dxgw to detach from vpg - please check the AWS console") result = response return result @@ -181,12 +185,13 @@ def delete_association(client, module, gateway_id, virtual_gateway_id): def create_dx_gateway(client, module): params = dict() - params['name'] = module.params.get('name') - params['amazon_asn'] = module.params.get('amazon_asn') + params["name"] = module.params.get("name") + params["amazon_asn"] = module.params.get("amazon_asn") try: response = client.create_direct_connect_gateway( - directConnectGatewayName=params['name'], - amazonSideAsn=int(params['amazon_asn'])) + directConnectGatewayName=params["name"], + amazonSideAsn=int(params["amazon_asn"]), + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to create direct connect gateway.") @@ -198,21 +203,21 @@ def find_dx_gateway(client, module, gateway_id=None): params = dict() gateways = list() if gateway_id is not None: - params['directConnectGatewayId'] = gateway_id + params["directConnectGatewayId"] = gateway_id while True: try: resp = client.describe_direct_connect_gateways(**params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to describe gateways") - gateways.extend(resp['directConnectGateways']) - if 'nextToken' in resp: - params['nextToken'] = resp['nextToken'] + gateways.extend(resp["directConnectGateways"]) + if "nextToken" in resp: + params["nextToken"] = resp["nextToken"] else: break if gateways != []: count = 0 for gateway in gateways: - if module.params.get('name') == gateway['directConnectGatewayName']: + if module.params.get("name") == gateway["directConnectGatewayName"]: count += 1 return gateway return None @@ -222,7 +227,7 @@ def check_dxgw_association(client, module, gateway_id, virtual_gateway_id=None): try: if virtual_gateway_id is None: resp = client.describe_direct_connect_gateway_associations( - directConnectGatewayId=gateway_id + directConnectGatewayId=gateway_id, ) else: resp = client.describe_direct_connect_gateway_associations( @@ -241,22 +246,20 @@ def ensure_present(client, module): changed = False params = dict() result = dict() - params['name'] = module.params.get('name') - params['amazon_asn'] = module.params.get('amazon_asn') - params['virtual_gateway_id'] = module.params.get('virtual_gateway_id') + params["name"] = module.params.get("name") + params["amazon_asn"] = module.params.get("amazon_asn") + params["virtual_gateway_id"] = module.params.get("virtual_gateway_id") # check if a gateway matching our module args already exists existing_dxgw = find_dx_gateway(client, module) - if existing_dxgw is not None and existing_dxgw['directConnectGatewayState'] != 'deleted': - gateway_id = existing_dxgw['directConnectGatewayId'] + if existing_dxgw is not None and existing_dxgw["directConnectGatewayState"] != "deleted": + gateway_id = existing_dxgw["directConnectGatewayId"] # if a gateway_id was provided, check if it is attach to the DXGW - if params['virtual_gateway_id']: + if params["virtual_gateway_id"]: resp = check_dxgw_association( - client, - module, - gateway_id=gateway_id, - virtual_gateway_id=params['virtual_gateway_id']) + client, module, gateway_id=gateway_id, virtual_gateway_id=params["virtual_gateway_id"] + ) if not resp["directConnectGatewayAssociations"]: # attach the dxgw to the supplied virtual_gateway_id associate_direct_connect_gateway(client, module, gateway_id) @@ -267,26 +270,28 @@ def ensure_present(client, module): resp = check_dxgw_association(client, module, gateway_id=gateway_id) if resp["directConnectGatewayAssociations"]: - for association in resp['directConnectGatewayAssociations']: - if association['associationState'] not in ['disassociating', 'disassociated']: + for association in resp["directConnectGatewayAssociations"]: + if association["associationState"] not in ["disassociating", "disassociated"]: delete_association( client, module, gateway_id=gateway_id, - virtual_gateway_id=association['virtualGatewayId']) + virtual_gateway_id=association["virtualGatewayId"], + ) else: # create a new dxgw new_dxgw = create_dx_gateway(client, module) changed = True - gateway_id = new_dxgw['directConnectGateway']['directConnectGatewayId'] + gateway_id = new_dxgw["directConnectGateway"]["directConnectGatewayId"] # if a vpc-id was supplied, attempt to attach it to the dxgw - if params['virtual_gateway_id']: + if params["virtual_gateway_id"]: associate_direct_connect_gateway(client, module, gateway_id) - resp = check_dxgw_association(client, - module, - gateway_id=gateway_id - ) + resp = check_dxgw_association( + client, + module, + gateway_id=gateway_id, + ) if resp["directConnectGatewayAssociations"]: changed = True @@ -300,23 +305,23 @@ def ensure_absent(client, module): changed = False result = dict() - dx_gateway_id = module.params.get('direct_connect_gateway_id') + dx_gateway_id = module.params.get("direct_connect_gateway_id") existing_dxgw = find_dx_gateway(client, module, dx_gateway_id) if existing_dxgw is not None: - resp = check_dxgw_association(client, module, - gateway_id=dx_gateway_id) + resp = check_dxgw_association(client, module, gateway_id=dx_gateway_id) if resp["directConnectGatewayAssociations"]: - for association in resp['directConnectGatewayAssociations']: - if association['associationState'] not in ['disassociating', 'disassociated']: - delete_association(client, module, - gateway_id=dx_gateway_id, - virtual_gateway_id=association['virtualGatewayId']) + for association in resp["directConnectGatewayAssociations"]: + if association["associationState"] not in ["disassociating", "disassociated"]: + delete_association( + client, + module, + gateway_id=dx_gateway_id, + virtual_gateway_id=association["virtualGatewayId"], + ) # wait for deleting association - timeout = time.time() + module.params.get('wait_timeout') + timeout = time.time() + module.params.get("wait_timeout") while time.time() < timeout: - resp = check_dxgw_association(client, - module, - gateway_id=dx_gateway_id) + resp = check_dxgw_association(client, module, gateway_id=dx_gateway_id) if resp["directConnectGatewayAssociations"] != []: time.sleep(15) else: @@ -324,43 +329,44 @@ def ensure_absent(client, module): try: resp = client.delete_direct_connect_gateway( - directConnectGatewayId=dx_gateway_id + directConnectGatewayId=dx_gateway_id, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to delete gateway") - result = resp['directConnectGateway'] + result = resp["directConnectGateway"] return changed def main(): argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), name=dict(), amazon_asn=dict(), virtual_gateway_id=dict(), direct_connect_gateway_id=dict(), - wait_timeout=dict(type='int', default=320), + wait_timeout=dict(type="int", default=320), + ) + required_if = [("state", "present", ["name", "amazon_asn"]), ("state", "absent", ["direct_connect_gateway_id"])] + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=required_if, ) - required_if = [('state', 'present', ['name', 'amazon_asn']), - ('state', 'absent', ['direct_connect_gateway_id'])] - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=required_if) - state = module.params.get('state') + state = module.params.get("state") try: - client = module.client('directconnect') + client = module.client("directconnect") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - if state == 'present': + if state == "present": (changed, results) = ensure_present(client, module) - elif state == 'absent': + elif state == "absent": changed = ensure_absent(client, module) results = {} module.exit_json(changed=changed, **camel_dict_to_snake_dict(results)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/directconnect_link_aggregation_group.py b/plugins/modules/directconnect_link_aggregation_group.py index 8a50e3c7e89..9a532c63298 100644 --- a/plugins/modules/directconnect_link_aggregation_group.py +++ b/plugins/modules/directconnect_link_aggregation_group.py @@ -185,8 +185,8 @@ def lag_status(client, lag_id): def lag_exists(client, lag_id=None, lag_name=None, verify=True): - """ If verify=True, returns the LAG ID or None - If verify=False, returns the LAG's data (or an empty dict) + """If verify=True, returns the LAG ID or None + If verify=False, returns the LAG's data (or an empty dict) """ try: if lag_id: @@ -200,26 +200,24 @@ def lag_exists(client, lag_id=None, lag_name=None, verify=True): return {} else: failed_op = "Failed to describe DirectConnect link aggregation groups." - raise DirectConnectError(msg=failed_op, - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError(msg=failed_op, last_traceback=traceback.format_exc(), exception=e) match = [] # List of LAG IDs that are exact matches lag = [] # List of LAG data that are exact matches # look for matching connections - if len(response.get('lags', [])) == 1 and lag_id: - if response['lags'][0]['lagState'] != 'deleted': - match.append(response['lags'][0]['lagId']) - lag.append(response['lags'][0]) + if len(response.get("lags", [])) == 1 and lag_id: + if response["lags"][0]["lagState"] != "deleted": + match.append(response["lags"][0]["lagId"]) + lag.append(response["lags"][0]) else: - for each in response.get('lags', []): - if each['lagState'] != 'deleted': + for each in response.get("lags", []): + if each["lagState"] != "deleted": if not lag_id: - if lag_name == each['lagName']: - match.append(each['lagId']) + if lag_name == each["lagName"]: + match.append(each["lagId"]) else: - match.append(each['lagId']) + match.append(each["lagId"]) # verifying if the connections exists; if true, return connection identifier, otherwise return False if verify and len(match) == 1: @@ -237,36 +235,41 @@ def lag_exists(client, lag_id=None, lag_name=None, verify=True): def create_lag(client, num_connections, location, bandwidth, name, connection_id): if not name: - raise DirectConnectError(msg="Failed to create a Direct Connect link aggregation group: name required.", - last_traceback=None, - exception="") - - parameters = dict(numberOfConnections=num_connections, - location=location, - connectionsBandwidth=bandwidth, - lagName=name) + raise DirectConnectError( + msg="Failed to create a Direct Connect link aggregation group: name required.", + last_traceback=None, + exception="", + ) + + parameters = dict( + numberOfConnections=num_connections, location=location, connectionsBandwidth=bandwidth, lagName=name + ) if connection_id: parameters.update(connectionId=connection_id) try: lag = client.create_lag(**parameters) except botocore.exceptions.ClientError as e: - raise DirectConnectError(msg="Failed to create DirectConnect link aggregation group {0}".format(name), - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg="Failed to create DirectConnect link aggregation group {0}".format(name), + last_traceback=traceback.format_exc(), + exception=e, + ) - return lag['lagId'] + return lag["lagId"] def delete_lag(client, lag_id): try: client.delete_lag(lagId=lag_id) except botocore.exceptions.ClientError as e: - raise DirectConnectError(msg="Failed to delete Direct Connect link aggregation group {0}.".format(lag_id), - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg="Failed to delete Direct Connect link aggregation group {0}.".format(lag_id), + last_traceback=traceback.format_exc(), + exception=e, + ) -@AWSRetry.jittered_backoff(retries=5, delay=2, backoff=2.0, catch_extra_error_codes=['DirectConnectClientException']) +@AWSRetry.jittered_backoff(retries=5, delay=2, backoff=2.0, catch_extra_error_codes=["DirectConnectClientException"]) def _update_lag(client, lag_id, lag_name, min_links): params = {} if min_links: @@ -283,9 +286,9 @@ def update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_ if min_links and min_links > num_connections: raise DirectConnectError( msg="The number of connections {0} must be greater than the minimum number of links " - "{1} to update the LAG {2}".format(num_connections, min_links, lag_id), + "{1} to update the LAG {2}".format(num_connections, min_links, lag_id), last_traceback=None, - exception=None + exception=None, ) while True: @@ -295,26 +298,32 @@ def update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_ if wait and time.time() - start <= wait_timeout: continue msg = "Failed to update Direct Connect link aggregation group {0}.".format(lag_id) - if "MinimumLinks cannot be set higher than the number of connections" in e.response['Error']['Message']: - msg += "Unable to set the min number of links to {0} while the LAG connections are being requested".format(min_links) - raise DirectConnectError(msg=msg, - last_traceback=traceback.format_exc(), - exception=e) + if "MinimumLinks cannot be set higher than the number of connections" in e.response["Error"]["Message"]: + msg += ( + "Unable to set the min number of links to {0} while the LAG connections are being requested".format( + min_links + ) + ) + raise DirectConnectError(msg=msg, last_traceback=traceback.format_exc(), exception=e) else: break def lag_changed(current_status, name, min_links): - """ Determines if a modifiable link aggregation group attribute has been modified. """ - return (name and name != current_status['lagName']) or (min_links and min_links != current_status['minimumLinks']) + """Determines if a modifiable link aggregation group attribute has been modified.""" + return (name and name != current_status["lagName"]) or (min_links and min_links != current_status["minimumLinks"]) -def ensure_present(client, num_connections, lag_id, lag_name, location, bandwidth, connection_id, min_links, wait, wait_timeout): +def ensure_present( + client, num_connections, lag_id, lag_name, location, bandwidth, connection_id, min_links, wait, wait_timeout +): exists = lag_exists(client, lag_id, lag_name) if not exists and lag_id: - raise DirectConnectError(msg="The Direct Connect link aggregation group {0} does not exist.".format(lag_id), - last_traceback=None, - exception="") + raise DirectConnectError( + msg="The Direct Connect link aggregation group {0} does not exist.".format(lag_id), + last_traceback=None, + exception="", + ) # the connection is found; get the latest state and see if it needs to be updated if exists: @@ -336,27 +345,31 @@ def describe_virtual_interfaces(client, lag_id): try: response = client.describe_virtual_interfaces(connectionId=lag_id) except botocore.exceptions.ClientError as e: - raise DirectConnectError(msg="Failed to describe any virtual interfaces associated with LAG: {0}".format(lag_id), - last_traceback=traceback.format_exc(), - exception=e) - return response.get('virtualInterfaces', []) + raise DirectConnectError( + msg="Failed to describe any virtual interfaces associated with LAG: {0}".format(lag_id), + last_traceback=traceback.format_exc(), + exception=e, + ) + return response.get("virtualInterfaces", []) def get_connections_and_virtual_interfaces(client, lag_id): virtual_interfaces = describe_virtual_interfaces(client, lag_id) - connections = lag_status(client, lag_id=lag_id).get('connections', []) + connections = lag_status(client, lag_id=lag_id).get("connections", []) return virtual_interfaces, connections def disassociate_vis(client, lag_id, virtual_interfaces): for vi in virtual_interfaces: - delete_virtual_interface(client, vi['virtualInterfaceId']) + delete_virtual_interface(client, vi["virtualInterfaceId"]) try: - response = client.delete_virtual_interface(virtualInterfaceId=vi['virtualInterfaceId']) + response = client.delete_virtual_interface(virtualInterfaceId=vi["virtualInterfaceId"]) except botocore.exceptions.ClientError as e: - raise DirectConnectError(msg="Could not delete virtual interface {0} to delete link aggregation group {1}.".format(vi, lag_id), - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg="Could not delete virtual interface {0} to delete link aggregation group {1}.".format(vi, lag_id), + last_traceback=traceback.format_exc(), + exception=e, + ) def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassociation, wait, wait_timeout): @@ -370,32 +383,38 @@ def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassocia virtual_interfaces, connections = get_connections_and_virtual_interfaces(client, lag_id) # If min_links is not 0, there are associated connections, or if there are virtual interfaces, ask for force_delete - if any((latest_status['minimumLinks'], virtual_interfaces, connections)) and not force_delete: - raise DirectConnectError(msg="There are a minimum number of links, hosted connections, or associated virtual interfaces for LAG {0}. " - "To force deletion of the LAG use delete_force: True (if the LAG has virtual interfaces they will be deleted). " - "Optionally, to ensure hosted connections are deleted after disassociation use delete_with_disassociation: True " - "and wait: True (as Virtual Interfaces may take a few moments to delete)".format(lag_id), - last_traceback=None, - exception=None) + if any((latest_status["minimumLinks"], virtual_interfaces, connections)) and not force_delete: + raise DirectConnectError( + msg="There are a minimum number of links, hosted connections, or associated virtual interfaces for LAG {0}. " + "To force deletion of the LAG use delete_force: True (if the LAG has virtual interfaces they will be deleted). " + "Optionally, to ensure hosted connections are deleted after disassociation use delete_with_disassociation: True " + "and wait: True (as Virtual Interfaces may take a few moments to delete)".format(lag_id), + last_traceback=None, + exception=None, + ) # update min_links to be 0 so we can remove the LAG update_lag(client, lag_id, None, 0, len(connections), wait, wait_timeout) # if virtual_interfaces and not delete_vi_with_disassociation: Raise failure; can't delete while vi attached for connection in connections: - disassociate_connection_and_lag(client, connection['connectionId'], lag_id) + disassociate_connection_and_lag(client, connection["connectionId"], lag_id) if delete_with_disassociation: - delete_connection(client, connection['connectionId']) + delete_connection(client, connection["connectionId"]) for vi in virtual_interfaces: - delete_virtual_interface(client, vi['virtualInterfaceId']) + delete_virtual_interface(client, vi["virtualInterfaceId"]) start_time = time.time() while True: try: delete_lag(client, lag_id) except DirectConnectError as e: - if ('until its Virtual Interfaces are deleted' in e.exception) and (time.time() - start_time < wait_timeout) and wait: + if ( + ("until its Virtual Interfaces are deleted" in e.exception) + and (time.time() - start_time < wait_timeout) + and wait + ): continue else: return True @@ -403,54 +422,58 @@ def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassocia def main(): argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), + state=dict(required=True, choices=["present", "absent"]), name=dict(), link_aggregation_group_id=dict(), - num_connections=dict(type='int'), - min_links=dict(type='int'), + num_connections=dict(type="int"), + min_links=dict(type="int"), location=dict(), bandwidth=dict(), connection_id=dict(), - delete_with_disassociation=dict(type='bool', default=False), - force_delete=dict(type='bool', default=False), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=120), + delete_with_disassociation=dict(type="bool", default=False), + force_delete=dict(type="bool", default=False), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=120), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_one_of=[('link_aggregation_group_id', 'name')], - required_if=[('state', 'present', ('location', 'bandwidth'))], + required_one_of=[("link_aggregation_group_id", "name")], + required_if=[("state", "present", ("location", "bandwidth"))], ) try: - connection = module.client('directconnect') + connection = module.client("directconnect") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - state = module.params.get('state') + state = module.params.get("state") response = {} try: - if state == 'present': - changed, lag_id = ensure_present(connection, - num_connections=module.params.get("num_connections"), - lag_id=module.params.get("link_aggregation_group_id"), - lag_name=module.params.get("name"), - location=module.params.get("location"), - bandwidth=module.params.get("bandwidth"), - connection_id=module.params.get("connection_id"), - min_links=module.params.get("min_links"), - wait=module.params.get("wait"), - wait_timeout=module.params.get("wait_timeout")) + if state == "present": + changed, lag_id = ensure_present( + connection, + num_connections=module.params.get("num_connections"), + lag_id=module.params.get("link_aggregation_group_id"), + lag_name=module.params.get("name"), + location=module.params.get("location"), + bandwidth=module.params.get("bandwidth"), + connection_id=module.params.get("connection_id"), + min_links=module.params.get("min_links"), + wait=module.params.get("wait"), + wait_timeout=module.params.get("wait_timeout"), + ) response = lag_status(connection, lag_id) elif state == "absent": - changed = ensure_absent(connection, - lag_id=module.params.get("link_aggregation_group_id"), - lag_name=module.params.get("name"), - force_delete=module.params.get("force_delete"), - delete_with_disassociation=module.params.get("delete_with_disassociation"), - wait=module.params.get('wait'), - wait_timeout=module.params.get('wait_timeout')) + changed = ensure_absent( + connection, + lag_id=module.params.get("link_aggregation_group_id"), + lag_name=module.params.get("name"), + force_delete=module.params.get("force_delete"), + delete_with_disassociation=module.params.get("delete_with_disassociation"), + wait=module.params.get("wait"), + wait_timeout=module.params.get("wait_timeout"), + ) except DirectConnectError as e: if e.last_traceback: module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception)) @@ -460,5 +483,5 @@ def main(): module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/directconnect_virtual_interface.py b/plugins/modules/directconnect_virtual_interface.py index bded2ab57ab..ab6ee9d4ea4 100644 --- a/plugins/modules/directconnect_virtual_interface.py +++ b/plugins/modules/directconnect_virtual_interface.py @@ -264,61 +264,66 @@ def try_except_ClientError(failure_msg): - ''' - Wrapper for boto3 calls that uses AWSRetry and handles exceptions - ''' + """ + Wrapper for boto3 calls that uses AWSRetry and handles exceptions + """ + def wrapper(f): def run_func(*args, **kwargs): try: - result = AWSRetry.jittered_backoff(retries=8, delay=5, catch_extra_error_codes=['DirectConnectClientException'])(f)(*args, **kwargs) + result = AWSRetry.jittered_backoff( + retries=8, delay=5, catch_extra_error_codes=["DirectConnectClientException"] + )(f)(*args, **kwargs) except (ClientError, BotoCoreError) as e: raise DirectConnectError(failure_msg, traceback.format_exc(), e) return result + return run_func + return wrapper def find_unique_vi(client, connection_id, virtual_interface_id, name): - ''' - Determines if the virtual interface exists. Returns the virtual interface ID if an exact match is found. - If multiple matches are found False is returned. If no matches are found None is returned. - ''' + """ + Determines if the virtual interface exists. Returns the virtual interface ID if an exact match is found. + If multiple matches are found False is returned. If no matches are found None is returned. + """ # Get the virtual interfaces, filtering by the ID if provided. vi_params = {} if virtual_interface_id: - vi_params = {'virtualInterfaceId': virtual_interface_id} + vi_params = {"virtualInterfaceId": virtual_interface_id} - virtual_interfaces = try_except_ClientError( - failure_msg="Failed to describe virtual interface")( - client.describe_virtual_interfaces)(**vi_params).get('virtualInterfaces') + virtual_interfaces = try_except_ClientError(failure_msg="Failed to describe virtual interface")( + client.describe_virtual_interfaces + )(**vi_params).get("virtualInterfaces") # Remove deleting/deleted matches from the results. - virtual_interfaces = [vi for vi in virtual_interfaces if vi['virtualInterfaceState'] not in ('deleting', 'deleted')] + virtual_interfaces = [vi for vi in virtual_interfaces if vi["virtualInterfaceState"] not in ("deleting", "deleted")] matching_virtual_interfaces = filter_virtual_interfaces(virtual_interfaces, name, connection_id) return exact_match(matching_virtual_interfaces) def exact_match(virtual_interfaces): - ''' - Returns the virtual interface ID if one was found, - None if the virtual interface ID needs to be created, - False if an exact match was not found - ''' + """ + Returns the virtual interface ID if one was found, + None if the virtual interface ID needs to be created, + False if an exact match was not found + """ if not virtual_interfaces: return None if len(virtual_interfaces) == 1: - return virtual_interfaces[0]['virtualInterfaceId'] + return virtual_interfaces[0]["virtualInterfaceId"] else: return False def filter_virtual_interfaces(virtual_interfaces, name, connection_id): - ''' - Filters the available virtual interfaces to try to find a unique match - ''' + """ + Filters the available virtual interfaces to try to find a unique match + """ # Filter by name if provided. if name: matching_by_name = find_virtual_interface_by_name(virtual_interfaces, name) @@ -339,52 +344,56 @@ def filter_virtual_interfaces(virtual_interfaces, name, connection_id): def find_virtual_interface_by_connection_id(virtual_interfaces, connection_id): - ''' - Return virtual interfaces that have the connection_id associated - ''' - return [vi for vi in virtual_interfaces if vi['connectionId'] == connection_id] + """ + Return virtual interfaces that have the connection_id associated + """ + return [vi for vi in virtual_interfaces if vi["connectionId"] == connection_id] def find_virtual_interface_by_name(virtual_interfaces, name): - ''' - Return virtual interfaces that match the provided name - ''' - return [vi for vi in virtual_interfaces if vi['virtualInterfaceName'] == name] + """ + Return virtual interfaces that match the provided name + """ + return [vi for vi in virtual_interfaces if vi["virtualInterfaceName"] == name] def vi_state(client, virtual_interface_id): - ''' - Returns the state of the virtual interface. - ''' + """ + Returns the state of the virtual interface. + """ err_msg = "Failed to describe virtual interface: {0}".format(virtual_interface_id) - vi = try_except_ClientError(failure_msg=err_msg)(client.describe_virtual_interfaces)(virtualInterfaceId=virtual_interface_id) - return vi['virtualInterfaces'][0] + vi = try_except_ClientError(failure_msg=err_msg)(client.describe_virtual_interfaces)( + virtualInterfaceId=virtual_interface_id + ) + return vi["virtualInterfaces"][0] def assemble_params_for_creating_vi(params): - ''' - Returns kwargs to use in the call to create the virtual interface - - Params for public virtual interfaces: - virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, cidr - Params for private virtual interfaces: - virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, virtualGatewayId - ''' - - public = params['public'] - name = params['name'] - vlan = params['vlan'] - bgp_asn = params['bgp_asn'] - auth_key = params['authentication_key'] - amazon_addr = params['amazon_address'] - customer_addr = params['customer_address'] - family_addr = params['address_type'] - cidr = params['cidr'] - virtual_gateway_id = params['virtual_gateway_id'] - direct_connect_gateway_id = params['direct_connect_gateway_id'] + """ + Returns kwargs to use in the call to create the virtual interface + + Params for public virtual interfaces: + virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, cidr + Params for private virtual interfaces: + virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, virtualGatewayId + """ + + public = params["public"] + name = params["name"] + vlan = params["vlan"] + bgp_asn = params["bgp_asn"] + auth_key = params["authentication_key"] + amazon_addr = params["amazon_address"] + customer_addr = params["customer_address"] + family_addr = params["address_type"] + cidr = params["cidr"] + virtual_gateway_id = params["virtual_gateway_id"] + direct_connect_gateway_id = params["direct_connect_gateway_id"] parameters = dict(virtualInterfaceName=name, vlan=vlan, asn=bgp_asn) - opt_params = dict(authKey=auth_key, amazonAddress=amazon_addr, customerAddress=customer_addr, addressFamily=family_addr) + opt_params = dict( + authKey=auth_key, amazonAddress=amazon_addr, customerAddress=customer_addr, addressFamily=family_addr + ) for name, value in opt_params.items(): if value: @@ -392,68 +401,74 @@ def assemble_params_for_creating_vi(params): # virtual interface type specific parameters if public and cidr: - parameters['routeFilterPrefixes'] = [{'cidr': c} for c in cidr] + parameters["routeFilterPrefixes"] = [{"cidr": c} for c in cidr] if not public: if virtual_gateway_id: - parameters['virtualGatewayId'] = virtual_gateway_id + parameters["virtualGatewayId"] = virtual_gateway_id elif direct_connect_gateway_id: - parameters['directConnectGatewayId'] = direct_connect_gateway_id + parameters["directConnectGatewayId"] = direct_connect_gateway_id return parameters def create_vi(client, public, associated_id, creation_params): - ''' - :param public: a boolean - :param associated_id: a link aggregation group ID or connection ID to associate - with the virtual interface. - :param creation_params: a dict of parameters to use in the AWS SDK call - :return The ID of the created virtual interface - ''' + """ + :param public: a boolean + :param associated_id: a link aggregation group ID or connection ID to associate + with the virtual interface. + :param creation_params: a dict of parameters to use in the AWS SDK call + :return The ID of the created virtual interface + """ err_msg = "Failed to create virtual interface" if public: - vi = try_except_ClientError(failure_msg=err_msg)(client.create_public_virtual_interface)(connectionId=associated_id, - newPublicVirtualInterface=creation_params) + vi = try_except_ClientError(failure_msg=err_msg)(client.create_public_virtual_interface)( + connectionId=associated_id, newPublicVirtualInterface=creation_params + ) else: - vi = try_except_ClientError(failure_msg=err_msg)(client.create_private_virtual_interface)(connectionId=associated_id, - newPrivateVirtualInterface=creation_params) - return vi['virtualInterfaceId'] + vi = try_except_ClientError(failure_msg=err_msg)(client.create_private_virtual_interface)( + connectionId=associated_id, newPrivateVirtualInterface=creation_params + ) + return vi["virtualInterfaceId"] def modify_vi(client, virtual_interface_id, connection_id): - ''' - Associate a new connection ID - ''' + """ + Associate a new connection ID + """ err_msg = "Unable to associate {0} with virtual interface {1}".format(connection_id, virtual_interface_id) - try_except_ClientError(failure_msg=err_msg)(client.associate_virtual_interface)(virtualInterfaceId=virtual_interface_id, - connectionId=connection_id) + try_except_ClientError(failure_msg=err_msg)(client.associate_virtual_interface)( + virtualInterfaceId=virtual_interface_id, connectionId=connection_id + ) def needs_modification(client, virtual_interface_id, connection_id): - ''' - Determine if the associated connection ID needs to be updated - ''' - return vi_state(client, virtual_interface_id).get('connectionId') != connection_id + """ + Determine if the associated connection ID needs to be updated + """ + return vi_state(client, virtual_interface_id).get("connectionId") != connection_id def ensure_state(connection, module): changed = False - state = module.params['state'] - connection_id = module.params['id_to_associate'] - public = module.params['public'] - name = module.params['name'] + state = module.params["state"] + connection_id = module.params["id_to_associate"] + public = module.params["public"] + name = module.params["name"] - virtual_interface_id = find_unique_vi(connection, connection_id, module.params.get('virtual_interface_id'), name) + virtual_interface_id = find_unique_vi(connection, connection_id, module.params.get("virtual_interface_id"), name) if virtual_interface_id is False: - module.fail_json(msg="Multiple virtual interfaces were found. Use the virtual_interface_id, name, " - "and connection_id options if applicable to find a unique match.") + module.fail_json( + msg="Multiple virtual interfaces were found. Use the virtual_interface_id, name, " + "and connection_id options if applicable to find a unique match." + ) - if state == 'present': - - if not virtual_interface_id and module.params['virtual_interface_id']: - module.fail_json(msg="The virtual interface {0} does not exist.".format(module.params['virtual_interface_id'])) + if state == "present": + if not virtual_interface_id and module.params["virtual_interface_id"]: + module.fail_json( + msg="The virtual interface {0} does not exist.".format(module.params["virtual_interface_id"]) + ) elif not virtual_interface_id: assembled_params = assemble_params_for_creating_vi(module.params) @@ -478,31 +493,35 @@ def ensure_state(connection, module): def main(): argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), - id_to_associate=dict(required=True, aliases=['link_aggregation_group_id', 'connection_id']), - public=dict(type='bool'), + state=dict(required=True, choices=["present", "absent"]), + id_to_associate=dict(required=True, aliases=["link_aggregation_group_id", "connection_id"]), + public=dict(type="bool"), name=dict(), - vlan=dict(type='int', default=100), - bgp_asn=dict(type='int', default=65000), + vlan=dict(type="int", default=100), + bgp_asn=dict(type="int", default=65000), authentication_key=dict(no_log=True), amazon_address=dict(), customer_address=dict(), address_type=dict(), - cidr=dict(type='list', elements='str'), + cidr=dict(type="list", elements="str"), virtual_gateway_id=dict(), direct_connect_gateway_id=dict(), - virtual_interface_id=dict() + virtual_interface_id=dict(), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_one_of=[['virtual_interface_id', 'name']], - required_if=[['state', 'present', ['public']], - ['public', True, ['amazon_address']], - ['public', True, ['customer_address']], - ['public', True, ['cidr']]], - mutually_exclusive=[['virtual_gateway_id', 'direct_connect_gateway_id']]) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_one_of=[["virtual_interface_id", "name"]], + required_if=[ + ["state", "present", ["public"]], + ["public", True, ["amazon_address"]], + ["public", True, ["customer_address"]], + ["public", True, ["cidr"]], + ], + mutually_exclusive=[["virtual_gateway_id", "direct_connect_gateway_id"]], + ) - connection = module.client('directconnect') + connection = module.client("directconnect") try: changed, latest_state = ensure_state(connection, module) @@ -515,5 +534,5 @@ def main(): module.exit_json(changed=changed, **camel_dict_to_snake_dict(latest_state)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/dms_endpoint.py b/plugins/modules/dms_endpoint.py index 66b5dd9b2c3..b417003689d 100644 --- a/plugins/modules/dms_endpoint.py +++ b/plugins/modules/dms_endpoint.py @@ -346,8 +346,8 @@ @AWSRetry.jittered_backoff(**backoff_params) def dms_describe_tags(connection, **params): - """ checks if the endpoint exists """ - tags = connection.list_tags_for_resource(**params).get('TagList', []) + """checks if the endpoint exists""" + tags = connection.list_tags_for_resource(**params).get("TagList", []) return boto3_tag_list_to_ansible_dict(tags) @@ -355,15 +355,14 @@ def dms_describe_tags(connection, **params): def dms_describe_endpoints(connection, **params): try: endpoints = connection.describe_endpoints(**params) - except is_boto3_error_code('ResourceNotFoundFault'): + except is_boto3_error_code("ResourceNotFoundFault"): return None - return endpoints.get('Endpoints', None) + return endpoints.get("Endpoints", None) def describe_endpoint(connection, endpoint_identifier): - """ checks if the endpoint exists """ - endpoint_filter = dict(Name='endpoint-id', - Values=[endpoint_identifier]) + """checks if the endpoint exists""" + endpoint_filter = dict(Name="endpoint-id", Values=[endpoint_identifier]) try: endpoints = dms_describe_endpoints(connection, Filters=[endpoint_filter]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -374,8 +373,8 @@ def describe_endpoint(connection, endpoint_identifier): endpoint = endpoints[0] try: - tags = dms_describe_tags(connection, ResourceArn=endpoint['EndpointArn']) - endpoint['tags'] = tags + tags = dms_describe_tags(connection, ResourceArn=endpoint["EndpointArn"]) + endpoint["tags"] = tags except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe the DMS endpoint tags") return endpoint @@ -384,7 +383,7 @@ def describe_endpoint(connection, endpoint_identifier): @AWSRetry.jittered_backoff(**backoff_params) def dms_delete_endpoint(client, **params): """deletes the DMS endpoint based on the EndpointArn""" - if module.params.get('wait'): + if module.params.get("wait"): return delete_dms_endpoint(client) else: return client.delete_endpoint(**params) @@ -392,19 +391,19 @@ def dms_delete_endpoint(client, **params): @AWSRetry.jittered_backoff(**backoff_params) def dms_create_endpoint(client, **params): - """ creates the DMS endpoint""" + """creates the DMS endpoint""" return client.create_endpoint(**params) @AWSRetry.jittered_backoff(**backoff_params) def dms_modify_endpoint(client, **params): - """ updates the endpoint""" + """updates the endpoint""" return client.modify_endpoint(**params) @AWSRetry.jittered_backoff(**backoff_params) def get_endpoint_deleted_waiter(client): - return client.get_waiter('endpoint_deleted') + return client.get_waiter("endpoint_deleted") @AWSRetry.jittered_backoff(**backoff_params) @@ -418,32 +417,22 @@ def dms_add_tags(client, **params): def endpoint_exists(endpoint): - """ Returns boolean based on the existence of the endpoint + """Returns boolean based on the existence of the endpoint :param endpoint: dict containing the described endpoint :return: bool """ - return bool(len(endpoint['Endpoints'])) + return bool(len(endpoint["Endpoints"])) def delete_dms_endpoint(connection, endpoint_arn): try: - delete_arn = dict( - EndpointArn=endpoint_arn - ) - if module.params.get('wait'): - + delete_arn = dict(EndpointArn=endpoint_arn) + if module.params.get("wait"): delete_output = connection.delete_endpoint(**delete_arn) delete_waiter = get_endpoint_deleted_waiter(connection) delete_waiter.wait( - Filters=[{ - 'Name': 'endpoint-arn', - 'Values': [endpoint_arn] - - }], - WaiterConfig={ - 'Delay': module.params.get('timeout'), - 'MaxAttempts': module.params.get('retries') - } + Filters=[{"Name": "endpoint-arn", "Values": [endpoint_arn]}], + WaiterConfig={"Delay": module.params.get("timeout"), "MaxAttempts": module.params.get("retries")}, ) return delete_output else: @@ -458,71 +447,62 @@ def create_module_params(): :return: dict """ endpoint_parameters = dict( - EndpointIdentifier=module.params.get('endpointidentifier'), - EndpointType=module.params.get('endpointtype'), - EngineName=module.params.get('enginename'), - Username=module.params.get('username'), - Password=module.params.get('password'), - ServerName=module.params.get('servername'), - Port=module.params.get('port'), - DatabaseName=module.params.get('databasename'), - SslMode=module.params.get('sslmode') + EndpointIdentifier=module.params.get("endpointidentifier"), + EndpointType=module.params.get("endpointtype"), + EngineName=module.params.get("enginename"), + Username=module.params.get("username"), + Password=module.params.get("password"), + ServerName=module.params.get("servername"), + Port=module.params.get("port"), + DatabaseName=module.params.get("databasename"), + SslMode=module.params.get("sslmode"), ) - if module.params.get('EndpointArn'): - endpoint_parameters['EndpointArn'] = module.params.get('EndpointArn') - if module.params.get('certificatearn'): - endpoint_parameters['CertificateArn'] = \ - module.params.get('certificatearn') + if module.params.get("EndpointArn"): + endpoint_parameters["EndpointArn"] = module.params.get("EndpointArn") + if module.params.get("certificatearn"): + endpoint_parameters["CertificateArn"] = module.params.get("certificatearn") - if module.params.get('dmstransfersettings'): - endpoint_parameters['DmsTransferSettings'] = \ - module.params.get('dmstransfersettings') + if module.params.get("dmstransfersettings"): + endpoint_parameters["DmsTransferSettings"] = module.params.get("dmstransfersettings") - if module.params.get('extraconnectionattributes'): - endpoint_parameters['ExtraConnectionAttributes'] =\ - module.params.get('extraconnectionattributes') + if module.params.get("extraconnectionattributes"): + endpoint_parameters["ExtraConnectionAttributes"] = module.params.get("extraconnectionattributes") - if module.params.get('kmskeyid'): - endpoint_parameters['KmsKeyId'] = module.params.get('kmskeyid') + if module.params.get("kmskeyid"): + endpoint_parameters["KmsKeyId"] = module.params.get("kmskeyid") - if module.params.get('tags'): - endpoint_parameters['Tags'] = module.params.get('tags') + if module.params.get("tags"): + endpoint_parameters["Tags"] = module.params.get("tags") - if module.params.get('serviceaccessrolearn'): - endpoint_parameters['ServiceAccessRoleArn'] = \ - module.params.get('serviceaccessrolearn') + if module.params.get("serviceaccessrolearn"): + endpoint_parameters["ServiceAccessRoleArn"] = module.params.get("serviceaccessrolearn") - if module.params.get('externaltabledefinition'): - endpoint_parameters['ExternalTableDefinition'] = \ - module.params.get('externaltabledefinition') + if module.params.get("externaltabledefinition"): + endpoint_parameters["ExternalTableDefinition"] = module.params.get("externaltabledefinition") - if module.params.get('dynamodbsettings'): - endpoint_parameters['DynamoDbSettings'] = \ - module.params.get('dynamodbsettings') + if module.params.get("dynamodbsettings"): + endpoint_parameters["DynamoDbSettings"] = module.params.get("dynamodbsettings") - if module.params.get('s3settings'): - endpoint_parameters['S3Settings'] = module.params.get('s3settings') + if module.params.get("s3settings"): + endpoint_parameters["S3Settings"] = module.params.get("s3settings") - if module.params.get('mongodbsettings'): - endpoint_parameters['MongoDbSettings'] = \ - module.params.get('mongodbsettings') + if module.params.get("mongodbsettings"): + endpoint_parameters["MongoDbSettings"] = module.params.get("mongodbsettings") - if module.params.get('kinesissettings'): - endpoint_parameters['KinesisSettings'] = \ - module.params.get('kinesissettings') + if module.params.get("kinesissettings"): + endpoint_parameters["KinesisSettings"] = module.params.get("kinesissettings") - if module.params.get('elasticsearchsettings'): - endpoint_parameters['ElasticsearchSettings'] = \ - module.params.get('elasticsearchsettings') + if module.params.get("elasticsearchsettings"): + endpoint_parameters["ElasticsearchSettings"] = module.params.get("elasticsearchsettings") - if module.params.get('wait'): - endpoint_parameters['wait'] = module.boolean(module.params.get('wait')) + if module.params.get("wait"): + endpoint_parameters["wait"] = module.boolean(module.params.get("wait")) - if module.params.get('timeout'): - endpoint_parameters['timeout'] = module.params.get('timeout') + if module.params.get("timeout"): + endpoint_parameters["timeout"] = module.params.get("timeout") - if module.params.get('retries'): - endpoint_parameters['retries'] = module.params.get('retries') + if module.params.get("retries"): + endpoint_parameters["retries"] = module.params.get("retries") return endpoint_parameters @@ -538,14 +518,16 @@ def compare_params(param_described): param_described = dict(param_described) modparams = create_module_params() # modify can't update tags - param_described.pop('Tags', None) - modparams.pop('Tags', None) + param_described.pop("Tags", None) + modparams.pop("Tags", None) changed = False for paramname in modparams: - if paramname == 'Password' or paramname in param_described \ - and param_described[paramname] == modparams[paramname] or \ - str(param_described[paramname]).lower() \ - == modparams[paramname]: + if ( + paramname == "Password" + or paramname in param_described + and param_described[paramname] == modparams[paramname] + or str(param_described[paramname]).lower() == modparams[paramname] + ): pass else: changed = True @@ -553,25 +535,24 @@ def compare_params(param_described): def modify_dms_endpoint(connection, endpoint): - arn = endpoint['EndpointArn'] + arn = endpoint["EndpointArn"] try: params = create_module_params() # modify can't update tags - params.pop('Tags', None) + params.pop("Tags", None) return dms_modify_endpoint(connection, EndpointArn=arn, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to update DMS endpoint.", params=params) def ensure_tags(connection, endpoint): - desired_tags = module.params.get('tags', None) + desired_tags = module.params.get("tags", None) if desired_tags is None: return False - current_tags = endpoint.get('tags', {}) + current_tags = endpoint.get("tags", {}) - tags_to_add, tags_to_remove = compare_aws_tags(current_tags, desired_tags, - module.params.get('purge_tags')) + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, desired_tags, module.params.get("purge_tags")) if not tags_to_remove and not tags_to_add: return False @@ -579,7 +560,7 @@ def ensure_tags(connection, endpoint): if module.check_mode: return True - arn = endpoint.get('EndpointArn') + arn = endpoint.get("EndpointArn") try: if tags_to_remove: @@ -609,36 +590,49 @@ def create_dms_endpoint(connection): def main(): argument_spec = dict( - state=dict(choices=['present', 'absent'], default='present'), + state=dict(choices=["present", "absent"], default="present"), endpointidentifier=dict(required=True), - endpointtype=dict(choices=['source', 'target']), - enginename=dict(choices=['mysql', 'oracle', 'postgres', 'mariadb', - 'aurora', 'redshift', 's3', 'db2', 'azuredb', - 'sybase', 'dynamodb', 'mongodb', 'sqlserver'], - required=False), + endpointtype=dict(choices=["source", "target"]), + enginename=dict( + choices=[ + "mysql", + "oracle", + "postgres", + "mariadb", + "aurora", + "redshift", + "s3", + "db2", + "azuredb", + "sybase", + "dynamodb", + "mongodb", + "sqlserver", + ], + required=False, + ), username=dict(), password=dict(no_log=True), servername=dict(), - port=dict(type='int'), + port=dict(type="int"), databasename=dict(), extraconnectionattributes=dict(), kmskeyid=dict(no_log=False), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), certificatearn=dict(), - sslmode=dict(choices=['none', 'require', 'verify-ca', 'verify-full'], - default='none'), + sslmode=dict(choices=["none", "require", "verify-ca", "verify-full"], default="none"), serviceaccessrolearn=dict(), externaltabledefinition=dict(), - dynamodbsettings=dict(type='dict'), - s3settings=dict(type='dict'), - dmstransfersettings=dict(type='dict'), - mongodbsettings=dict(type='dict'), - kinesissettings=dict(type='dict'), - elasticsearchsettings=dict(type='dict'), - wait=dict(type='bool', default=False), - timeout=dict(type='int'), - retries=dict(type='int') + dynamodbsettings=dict(type="dict"), + s3settings=dict(type="dict"), + dmstransfersettings=dict(type="dict"), + mongodbsettings=dict(type="dict"), + kinesissettings=dict(type="dict"), + elasticsearchsettings=dict(type="dict"), + wait=dict(type="bool", default=False), + timeout=dict(type="int"), + retries=dict(type="int"), ) global module module = AnsibleAWSModule( @@ -650,49 +644,48 @@ def main(): ["wait", "True", ["timeout"]], ["wait", "True", ["retries"]], ], - supports_check_mode=False + supports_check_mode=False, ) exit_message = None changed = False - state = module.params.get('state') + state = module.params.get("state") - dmsclient = module.client('dms') - endpoint = describe_endpoint(dmsclient, - module.params.get('endpointidentifier')) - if state == 'present': + dmsclient = module.client("dms") + endpoint = describe_endpoint(dmsclient, module.params.get("endpointidentifier")) + if state == "present": if endpoint: changed |= ensure_tags(dmsclient, endpoint) params_changed = compare_params(endpoint) if params_changed: updated_dms = modify_dms_endpoint(dmsclient, endpoint) exit_message = updated_dms - endpoint = exit_message.get('Endpoint') + endpoint = exit_message.get("Endpoint") changed = True else: exit_message = "Endpoint Already Exists" else: exit_message = create_dms_endpoint(dmsclient) - endpoint = exit_message.get('Endpoint') + endpoint = exit_message.get("Endpoint") changed = True if changed: # modify and create don't return tags - tags = dms_describe_tags(dmsclient, ResourceArn=endpoint['EndpointArn']) - endpoint['tags'] = tags - elif state == 'absent': + tags = dms_describe_tags(dmsclient, ResourceArn=endpoint["EndpointArn"]) + endpoint["tags"] = tags + elif state == "absent": if endpoint: - delete_results = delete_dms_endpoint(dmsclient, endpoint['EndpointArn']) + delete_results = delete_dms_endpoint(dmsclient, endpoint["EndpointArn"]) exit_message = delete_results endpoint = None changed = True else: changed = False - exit_message = 'DMS Endpoint does not exist' + exit_message = "DMS Endpoint does not exist" - endpoint = camel_dict_to_snake_dict(endpoint or {}, ignore_list=['tags']) + endpoint = camel_dict_to_snake_dict(endpoint or {}, ignore_list=["tags"]) module.exit_json(changed=changed, endpoint=endpoint, msg=exit_message) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/dms_replication_subnet_group.py b/plugins/modules/dms_replication_subnet_group.py index bd75df6d67b..7135aa14ea6 100644 --- a/plugins/modules/dms_replication_subnet_group.py +++ b/plugins/modules/dms_replication_subnet_group.py @@ -73,16 +73,15 @@ def describe_subnet_group(connection, subnet_group): """checks if instance exists""" try: - subnet_group_filter = dict(Name='replication-subnet-group-id', - Values=[subnet_group]) + subnet_group_filter = dict(Name="replication-subnet-group-id", Values=[subnet_group]) return connection.describe_replication_subnet_groups(Filters=[subnet_group_filter]) except botocore.exceptions.ClientError: - return {'ReplicationSubnetGroups': []} + return {"ReplicationSubnetGroups": []} @AWSRetry.jittered_backoff(**backoff_params) def replication_subnet_group_create(connection, **params): - """ creates the replication subnet group """ + """creates the replication subnet group""" return connection.create_replication_subnet_group(**params) @@ -93,17 +92,17 @@ def replication_subnet_group_modify(connection, **modify_params): @AWSRetry.jittered_backoff(**backoff_params) def replication_subnet_group_delete(module, connection): - subnetid = module.params.get('identifier') + subnetid = module.params.get("identifier") delete_parameters = dict(ReplicationSubnetGroupIdentifier=subnetid) return connection.delete_replication_subnet_group(**delete_parameters) def replication_subnet_exists(subnet): - """ Returns boolean based on the existence of the endpoint + """Returns boolean based on the existence of the endpoint :param endpoint: dict containing the described endpoint :return: bool """ - return bool(len(subnet['ReplicationSubnetGroups'])) + return bool(len(subnet["ReplicationSubnetGroups"])) def create_module_params(module): @@ -113,9 +112,9 @@ def create_module_params(module): """ instance_parameters = dict( # ReplicationSubnetGroupIdentifier gets translated to lower case anyway by the API - ReplicationSubnetGroupIdentifier=module.params.get('identifier').lower(), - ReplicationSubnetGroupDescription=module.params.get('description'), - SubnetIds=module.params.get('subnet_ids'), + ReplicationSubnetGroupIdentifier=module.params.get("identifier").lower(), + ReplicationSubnetGroupDescription=module.params.get("description"), + SubnetIds=module.params.get("subnet_ids"), ) return instance_parameters @@ -132,19 +131,18 @@ def compare_params(module, param_described): modparams = create_module_params(module) changed = False # need to sanitize values that get returned from the API - if 'VpcId' in param_described.keys(): - param_described.pop('VpcId') - if 'SubnetGroupStatus' in param_described.keys(): - param_described.pop('SubnetGroupStatus') + if "VpcId" in param_described.keys(): + param_described.pop("VpcId") + if "SubnetGroupStatus" in param_described.keys(): + param_described.pop("SubnetGroupStatus") for paramname in modparams.keys(): - if paramname in param_described.keys() and \ - param_described.get(paramname) == modparams[paramname]: + if paramname in param_described.keys() and param_described.get(paramname) == modparams[paramname]: pass - elif paramname == 'SubnetIds': + elif paramname == "SubnetIds": subnets = [] - for subnet in param_described.get('Subnets'): - subnets.append(subnet.get('SubnetIdentifier')) - for modulesubnet in modparams['SubnetIds']: + for subnet in param_described.get("Subnets"): + subnets.append(subnet.get("SubnetIdentifier")) + for modulesubnet in modparams["SubnetIds"]: if modulesubnet in subnets: pass else: @@ -170,23 +168,19 @@ def modify_replication_subnet_group(module, connection): def main(): argument_spec = dict( - state=dict(type='str', choices=['present', 'absent'], default='present'), - identifier=dict(type='str', required=True), - description=dict(type='str', required=True), - subnet_ids=dict(type='list', elements='str', required=True), - ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True + state=dict(type="str", choices=["present", "absent"], default="present"), + identifier=dict(type="str", required=True), + description=dict(type="str", required=True), + subnet_ids=dict(type="list", elements="str", required=True), ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) exit_message = None changed = False - state = module.params.get('state') - dmsclient = module.client('dms') - subnet_group = describe_subnet_group(dmsclient, - module.params.get('identifier')) - if state == 'present': + state = module.params.get("state") + dmsclient = module.client("dms") + subnet_group = describe_subnet_group(dmsclient, module.params.get("identifier")) + if state == "present": if replication_subnet_exists(subnet_group): if compare_params(module, subnet_group["ReplicationSubnetGroups"][0]): if not module.check_mode: @@ -203,7 +197,7 @@ def main(): else: exit_message = "Check mode enabled" - elif state == 'absent': + elif state == "absent": if replication_subnet_exists(subnet_group): if not module.check_mode: replication_subnet_group_delete(module, dmsclient) @@ -220,5 +214,5 @@ def main(): module.exit_json(changed=changed, msg=exit_message) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/dynamodb_table.py b/plugins/modules/dynamodb_table.py index a059198d858..a9503735557 100644 --- a/plugins/modules/dynamodb_table.py +++ b/plugins/modules/dynamodb_table.py @@ -259,12 +259,19 @@ from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -DYNAMO_TYPE_DEFAULT = 'STRING' -INDEX_REQUIRED_OPTIONS = ['name', 'type', 'hash_key_name'] -INDEX_OPTIONS = INDEX_REQUIRED_OPTIONS + ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity'] -INDEX_TYPE_OPTIONS = ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only'] +DYNAMO_TYPE_DEFAULT = "STRING" +INDEX_REQUIRED_OPTIONS = ["name", "type", "hash_key_name"] +INDEX_OPTIONS = INDEX_REQUIRED_OPTIONS + [ + "hash_key_type", + "range_key_name", + "range_key_type", + "includes", + "read_capacity", + "write_capacity", +] +INDEX_TYPE_OPTIONS = ["all", "global_all", "global_include", "global_keys_only", "include", "keys_only"] # Map in both directions -DYNAMO_TYPE_MAP_LONG = {'STRING': 'S', 'NUMBER': 'N', 'BINARY': 'B'} +DYNAMO_TYPE_MAP_LONG = {"STRING": "S", "NUMBER": "N", "BINARY": "B"} DYNAMO_TYPE_MAP_SHORT = dict((v, k) for k, v in DYNAMO_TYPE_MAP_LONG.items()) KEY_TYPE_CHOICES = list(DYNAMO_TYPE_MAP_LONG.keys()) @@ -273,58 +280,63 @@ # LimitExceededException/ResourceInUseException exceptions at you. This can be # pretty slow, so add plenty of retries... @AWSRetry.jittered_backoff( - retries=45, delay=5, max_delay=30, - catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException', 'ResourceNotFoundException'], + retries=45, + delay=5, + max_delay=30, + catch_extra_error_codes=["LimitExceededException", "ResourceInUseException", "ResourceNotFoundException"], ) def _update_table_with_long_retry(**changes): - return client.update_table( - TableName=module.params.get('name'), - **changes - ) + return client.update_table(TableName=module.params.get("name"), **changes) # ResourceNotFoundException is expected here if the table doesn't exist -@AWSRetry.jittered_backoff(catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["LimitExceededException", "ResourceInUseException"]) def _describe_table(**params): return client.describe_table(**params) def wait_exists(): - table_name = module.params.get('name') - wait_timeout = module.params.get('wait_timeout') + table_name = module.params.get("name") + wait_timeout = module.params.get("wait_timeout") delay = min(wait_timeout, 5) max_attempts = wait_timeout // delay try: - waiter = client.get_waiter('table_exists') + waiter = client.get_waiter("table_exists") waiter.wait( - WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}, + WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts}, TableName=table_name, ) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timeout while waiting on table creation') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed while waiting on table creation') + module.fail_json_aws(e, msg="Timeout while waiting on table creation") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed while waiting on table creation") def wait_not_exists(): - table_name = module.params.get('name') - wait_timeout = module.params.get('wait_timeout') + table_name = module.params.get("name") + wait_timeout = module.params.get("wait_timeout") delay = min(wait_timeout, 5) max_attempts = wait_timeout // delay try: - waiter = client.get_waiter('table_not_exists') + waiter = client.get_waiter("table_not_exists") waiter.wait( - WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}, + WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts}, TableName=table_name, ) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timeout while waiting on table deletion') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed while waiting on table deletion') + module.fail_json_aws(e, msg="Timeout while waiting on table deletion") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed while waiting on table deletion") def _short_type_to_long(short_key): @@ -360,21 +372,21 @@ def _decode_primary_index(current_table): # The schema/attribute definitions are a list of dicts which need the same # treatment as boto3's tag lists schema = boto3_tag_list_to_ansible_dict( - current_table.get('key_schema', []), + current_table.get("key_schema", []), # Map from 'HASH'/'RANGE' to attribute name - tag_name_key_name='key_type', - tag_value_key_name='attribute_name', + tag_name_key_name="key_type", + tag_value_key_name="attribute_name", ) attributes = boto3_tag_list_to_ansible_dict( - current_table.get('attribute_definitions', []), + current_table.get("attribute_definitions", []), # Map from attribute name to 'S'/'N'/'B'. - tag_name_key_name='attribute_name', - tag_value_key_name='attribute_type', + tag_name_key_name="attribute_name", + tag_value_key_name="attribute_type", ) - hash_key_name = schema.get('HASH') + hash_key_name = schema.get("HASH") hash_key_type = _short_type_to_long(attributes.get(hash_key_name, None)) - range_key_name = schema.get('RANGE', None) + range_key_name = schema.get("RANGE", None) range_key_type = _short_type_to_long(attributes.get(range_key_name, None)) return dict( @@ -385,56 +397,56 @@ def _decode_primary_index(current_table): ) -def _decode_index(index_data, attributes, type_prefix=''): +def _decode_index(index_data, attributes, type_prefix=""): try: index_map = dict( - name=index_data['index_name'], + name=index_data["index_name"], ) index_data = dict(index_data) - index_data['attribute_definitions'] = attributes + index_data["attribute_definitions"] = attributes index_map.update(_decode_primary_index(index_data)) - throughput = index_data.get('provisioned_throughput', {}) - index_map['provisioned_throughput'] = throughput + throughput = index_data.get("provisioned_throughput", {}) + index_map["provisioned_throughput"] = throughput if throughput: - index_map['read_capacity'] = throughput.get('read_capacity_units') - index_map['write_capacity'] = throughput.get('write_capacity_units') + index_map["read_capacity"] = throughput.get("read_capacity_units") + index_map["write_capacity"] = throughput.get("write_capacity_units") - projection = index_data.get('projection', {}) + projection = index_data.get("projection", {}) if projection: - index_map['type'] = type_prefix + projection.get('projection_type') - index_map['includes'] = projection.get('non_key_attributes', []) + index_map["type"] = type_prefix + projection.get("projection_type") + index_map["includes"] = projection.get("non_key_attributes", []) return index_map except Exception as e: - module.fail_json_aws(e, msg='Decode failure', index_data=index_data) + module.fail_json_aws(e, msg="Decode failure", index_data=index_data) def compatability_results(current_table): if not current_table: return dict() - billing_mode = current_table.get('billing_mode') + billing_mode = current_table.get("billing_mode") primary_indexes = _decode_primary_index(current_table) - hash_key_name = primary_indexes.get('hash_key_name') - hash_key_type = primary_indexes.get('hash_key_type') - range_key_name = primary_indexes.get('range_key_name') - range_key_type = primary_indexes.get('range_key_type') + hash_key_name = primary_indexes.get("hash_key_name") + hash_key_type = primary_indexes.get("hash_key_type") + range_key_name = primary_indexes.get("range_key_name") + range_key_type = primary_indexes.get("range_key_type") indexes = list() - global_indexes = current_table.get('_global_index_map', {}) - local_indexes = current_table.get('_local_index_map', {}) + global_indexes = current_table.get("_global_index_map", {}) + local_indexes = current_table.get("_local_index_map", {}) for index in global_indexes: idx = dict(global_indexes[index]) - idx.pop('provisioned_throughput', None) + idx.pop("provisioned_throughput", None) indexes.append(idx) for index in local_indexes: idx = dict(local_indexes[index]) - idx.pop('provisioned_throughput', None) + idx.pop("provisioned_throughput", None) indexes.append(idx) compat_results = dict( @@ -445,72 +457,78 @@ def compatability_results(current_table): indexes=indexes, billing_mode=billing_mode, region=module.region, - table_name=current_table.get('table_name', None), - table_class=current_table.get('table_class_summary', {}).get('table_class', None), - table_status=current_table.get('table_status', None), - tags=current_table.get('tags', {}), + table_name=current_table.get("table_name", None), + table_class=current_table.get("table_class_summary", {}).get("table_class", None), + table_status=current_table.get("table_status", None), + tags=current_table.get("tags", {}), ) if billing_mode == "PROVISIONED": - throughput = current_table.get('provisioned_throughput', {}) - compat_results['read_capacity'] = throughput.get('read_capacity_units', None) - compat_results['write_capacity'] = throughput.get('write_capacity_units', None) + throughput = current_table.get("provisioned_throughput", {}) + compat_results["read_capacity"] = throughput.get("read_capacity_units", None) + compat_results["write_capacity"] = throughput.get("write_capacity_units", None) return compat_results def get_dynamodb_table(): - table_name = module.params.get('name') + table_name = module.params.get("name") try: table = _describe_table(TableName=table_name) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to describe table') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to describe table") - table = table['Table'] + table = table["Table"] try: - tags = client.list_tags_of_resource(aws_retry=True, ResourceArn=table['TableArn'])['Tags'] - except is_boto3_error_code('AccessDeniedException'): - module.warn('Permission denied when listing tags') + tags = client.list_tags_of_resource(aws_retry=True, ResourceArn=table["TableArn"])["Tags"] + except is_boto3_error_code("AccessDeniedException"): + module.warn("Permission denied when listing tags") tags = [] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to list table tags') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to list table tags") tags = boto3_tag_list_to_ansible_dict(tags) table = camel_dict_to_snake_dict(table) # Put some of the values into places people will expect them - table['arn'] = table['table_arn'] - table['name'] = table['table_name'] - table['status'] = table['table_status'] - table['id'] = table['table_id'] - table['size'] = table['table_size_bytes'] - table['tags'] = tags + table["arn"] = table["table_arn"] + table["name"] = table["table_name"] + table["status"] = table["table_status"] + table["id"] = table["table_id"] + table["size"] = table["table_size_bytes"] + table["tags"] = tags - if 'table_class_summary' in table: - table['table_class'] = table['table_class_summary']['table_class'] + if "table_class_summary" in table: + table["table_class"] = table["table_class_summary"]["table_class"] # billing_mode_summary doesn't always seem to be set but is always set for PAY_PER_REQUEST # and when updating the billing_mode - if 'billing_mode_summary' in table: - table['billing_mode'] = table['billing_mode_summary']['billing_mode'] + if "billing_mode_summary" in table: + table["billing_mode"] = table["billing_mode_summary"]["billing_mode"] else: - table['billing_mode'] = "PROVISIONED" + table["billing_mode"] = "PROVISIONED" # convert indexes into something we can easily search against - attributes = table['attribute_definitions'] + attributes = table["attribute_definitions"] global_index_map = dict() local_index_map = dict() - for index in table.get('global_secondary_indexes', []): - idx = _decode_index(index, attributes, type_prefix='global_') - global_index_map[idx['name']] = idx - for index in table.get('local_secondary_indexes', []): + for index in table.get("global_secondary_indexes", []): + idx = _decode_index(index, attributes, type_prefix="global_") + global_index_map[idx["name"]] = idx + for index in table.get("local_secondary_indexes", []): idx = _decode_index(index, attributes) - local_index_map[idx['name']] = idx - table['_global_index_map'] = global_index_map - table['_local_index_map'] = local_index_map + local_index_map[idx["name"]] = idx + table["_global_index_map"] = global_index_map + table["_local_index_map"] = local_index_map return table @@ -521,19 +539,19 @@ def _generate_attribute_map(): """ attributes = dict() - for index in (module.params, *module.params.get('indexes')): + for index in (module.params, *module.params.get("indexes")): # run through hash_key_name and range_key_name - for t in ['hash', 'range']: - key_name = index.get(t + '_key_name') + for t in ["hash", "range"]: + key_name = index.get(t + "_key_name") if not key_name: continue - key_type = index.get(t + '_key_type') or DYNAMO_TYPE_DEFAULT + key_type = index.get(t + "_key_type") or DYNAMO_TYPE_DEFAULT _type = _long_type_to_short(key_type) if key_name in attributes: if _type != attributes[key_name]: - module.fail_json(msg='Conflicting attribute type', - type_1=_type, type_2=attributes[key_name], - key_name=key_name) + module.fail_json( + msg="Conflicting attribute type", type_1=_type, type_2=attributes[key_name], key_name=key_name + ) else: attributes[key_name] = _type @@ -546,9 +564,7 @@ def _generate_attributes(): # Use ansible_dict_to_boto3_tag_list to generate the list of dicts # format we need attrs = ansible_dict_to_boto3_tag_list( - attributes, - tag_name_key_name='AttributeName', - tag_value_key_name='AttributeType' + attributes, tag_name_key_name="AttributeName", tag_value_key_name="AttributeType" ) return list(attrs) @@ -557,8 +573,8 @@ def _generate_throughput(params=None): if not params: params = module.params - read_capacity = params.get('read_capacity') or 1 - write_capacity = params.get('write_capacity') or 1 + read_capacity = params.get("read_capacity") or 1 + write_capacity = params.get("write_capacity") or 1 throughput = dict( ReadCapacityUnits=read_capacity, WriteCapacityUnits=write_capacity, @@ -572,56 +588,54 @@ def _generate_schema(params=None): params = module.params schema = list() - hash_key_name = params.get('hash_key_name') - range_key_name = params.get('range_key_name') + hash_key_name = params.get("hash_key_name") + range_key_name = params.get("range_key_name") if hash_key_name: - entry = _schema_dict(hash_key_name, 'HASH') + entry = _schema_dict(hash_key_name, "HASH") schema.append(entry) if range_key_name: - entry = _schema_dict(range_key_name, 'RANGE') + entry = _schema_dict(range_key_name, "RANGE") schema.append(entry) return schema def _primary_index_changes(current_table): - primary_index = _decode_primary_index(current_table) - hash_key_name = primary_index.get('hash_key_name') - _hash_key_name = module.params.get('hash_key_name') - hash_key_type = primary_index.get('hash_key_type') - _hash_key_type = module.params.get('hash_key_type') - range_key_name = primary_index.get('range_key_name') - _range_key_name = module.params.get('range_key_name') - range_key_type = primary_index.get('range_key_type') - _range_key_type = module.params.get('range_key_type') + hash_key_name = primary_index.get("hash_key_name") + _hash_key_name = module.params.get("hash_key_name") + hash_key_type = primary_index.get("hash_key_type") + _hash_key_type = module.params.get("hash_key_type") + range_key_name = primary_index.get("range_key_name") + _range_key_name = module.params.get("range_key_name") + range_key_type = primary_index.get("range_key_type") + _range_key_type = module.params.get("range_key_type") changed = list() if _hash_key_name and (_hash_key_name != hash_key_name): - changed.append('hash_key_name') + changed.append("hash_key_name") if _hash_key_type and (_hash_key_type != hash_key_type): - changed.append('hash_key_type') + changed.append("hash_key_type") if _range_key_name and (_range_key_name != range_key_name): - changed.append('range_key_name') + changed.append("range_key_name") if _range_key_type and (_range_key_type != range_key_type): - changed.append('range_key_type') + changed.append("range_key_type") return changed def _throughput_changes(current_table, params=None): - if not params: params = module.params - throughput = current_table.get('provisioned_throughput', {}) - read_capacity = throughput.get('read_capacity_units', None) - _read_capacity = params.get('read_capacity') or read_capacity - write_capacity = throughput.get('write_capacity_units', None) - _write_capacity = params.get('write_capacity') or write_capacity + throughput = current_table.get("provisioned_throughput", {}) + read_capacity = throughput.get("read_capacity_units", None) + _read_capacity = params.get("read_capacity") or read_capacity + write_capacity = throughput.get("write_capacity_units", None) + _write_capacity = params.get("write_capacity") or write_capacity if (read_capacity != _read_capacity) or (write_capacity != _write_capacity): return dict( @@ -641,14 +655,14 @@ def _generate_global_indexes(billing_mode): if billing_mode == "PAY_PER_REQUEST": include_throughput = False - for index in module.params.get('indexes'): - if index.get('type') not in ['global_all', 'global_include', 'global_keys_only']: + for index in module.params.get("indexes"): + if index.get("type") not in ["global_all", "global_include", "global_keys_only"]: continue - name = index.get('name') + name = index.get("name") if name in index_exists: - module.fail_json(msg='Duplicate key {0} in list of global indexes'.format(name)) + module.fail_json(msg="Duplicate key {0} in list of global indexes".format(name)) # Convert the type name to upper case and remove the global_ - index['type'] = index['type'].upper()[7:] + index["type"] = index["type"].upper()[7:] index = _generate_index(index, include_throughput) index_exists[name] = True indexes.append(index) @@ -660,14 +674,14 @@ def _generate_local_indexes(): index_exists = dict() indexes = list() - for index in module.params.get('indexes'): + for index in module.params.get("indexes"): index = dict() - if index.get('type') not in ['all', 'include', 'keys_only']: + if index.get("type") not in ["all", "include", "keys_only"]: continue - name = index.get('name') + name = index.get("name") if name in index_exists: - module.fail_json(msg='Duplicate key {0} in list of local indexes'.format(name)) - index['type'] = index['type'].upper() + module.fail_json(msg="Duplicate key {0} in list of local indexes".format(name)) + index["type"] = index["type"].upper() index = _generate_index(index, False) index_exists[name] = True indexes.append(index) @@ -677,32 +691,32 @@ def _generate_local_indexes(): def _generate_global_index_map(current_table): global_index_map = dict() - existing_indexes = current_table['_global_index_map'] - for index in module.params.get('indexes'): - if index.get('type') not in ['global_all', 'global_include', 'global_keys_only']: + existing_indexes = current_table["_global_index_map"] + for index in module.params.get("indexes"): + if index.get("type") not in ["global_all", "global_include", "global_keys_only"]: continue - name = index.get('name') + name = index.get("name") if name in global_index_map: - module.fail_json(msg='Duplicate key {0} in list of global indexes'.format(name)) + module.fail_json(msg="Duplicate key {0} in list of global indexes".format(name)) idx = _merge_index_params(index, existing_indexes.get(name, {})) # Convert the type name to upper case and remove the global_ - idx['type'] = idx['type'].upper()[7:] + idx["type"] = idx["type"].upper()[7:] global_index_map[name] = idx return global_index_map def _generate_local_index_map(current_table): local_index_map = dict() - existing_indexes = current_table['_local_index_map'] - for index in module.params.get('indexes'): - if index.get('type') not in ['all', 'include', 'keys_only']: + existing_indexes = current_table["_local_index_map"] + for index in module.params.get("indexes"): + if index.get("type") not in ["all", "include", "keys_only"]: continue - name = index.get('name') + name = index.get("name") if name in local_index_map: - module.fail_json(msg='Duplicate key {0} in list of local indexes'.format(name)) + module.fail_json(msg="Duplicate key {0} in list of local indexes".format(name)) idx = _merge_index_params(index, existing_indexes.get(name, {})) # Convert the type name to upper case - idx['type'] = idx['type'].upper() + idx["type"] = idx["type"].upper() local_index_map[name] = idx return local_index_map @@ -710,27 +724,28 @@ def _generate_local_index_map(current_table): def _generate_index(index, include_throughput=True): key_schema = _generate_schema(index) throughput = _generate_throughput(index) - non_key_attributes = index['includes'] or [] + non_key_attributes = index["includes"] or [] projection = dict( - ProjectionType=index['type'], + ProjectionType=index["type"], ) - if index['type'] != 'ALL': + if index["type"] != "ALL": if non_key_attributes: - projection['NonKeyAttributes'] = non_key_attributes + projection["NonKeyAttributes"] = non_key_attributes else: if non_key_attributes: module.fail_json( "DynamoDB does not support specifying non-key-attributes ('includes') for " - "indexes of type 'all'. Index name: {0}".format(index['name'])) + "indexes of type 'all'. Index name: {0}".format(index["name"]) + ) idx = dict( - IndexName=index['name'], + IndexName=index["name"], KeySchema=key_schema, Projection=projection, ) if include_throughput: - idx['ProvisionedThroughput'] = throughput + idx["ProvisionedThroughput"] = throughput return idx @@ -741,15 +756,15 @@ def _attribute_changes(current_table): def _global_index_changes(current_table): - current_global_index_map = current_table['_global_index_map'] + current_global_index_map = current_table["_global_index_map"] global_index_map = _generate_global_index_map(current_table) - current_billing_mode = current_table.get('billing_mode') + current_billing_mode = current_table.get("billing_mode") - if module.params.get('billing_mode') is None: + if module.params.get("billing_mode") is None: billing_mode = current_billing_mode else: - billing_mode = module.params.get('billing_mode') + billing_mode = module.params.get("billing_mode") include_throughput = True @@ -760,7 +775,6 @@ def _global_index_changes(current_table): # TODO (future) it would be nice to add support for deleting an index for name in global_index_map: - idx = dict(_generate_index(global_index_map[name], include_throughput=include_throughput)) if name not in current_global_index_map: index_changes.append(dict(Create=idx)) @@ -797,37 +811,37 @@ def _update_table(current_table): # Get throughput / billing_mode changes throughput_changes = _throughput_changes(current_table) if throughput_changes: - changes['ProvisionedThroughput'] = throughput_changes + changes["ProvisionedThroughput"] = throughput_changes - current_billing_mode = current_table.get('billing_mode') - new_billing_mode = module.params.get('billing_mode') + current_billing_mode = current_table.get("billing_mode") + new_billing_mode = module.params.get("billing_mode") if new_billing_mode is None: new_billing_mode = current_billing_mode if current_billing_mode != new_billing_mode: - changes['BillingMode'] = new_billing_mode + changes["BillingMode"] = new_billing_mode # Update table_class use exisiting if none is defined - if module.params.get('table_class'): - if module.params.get('table_class') != current_table.get('table_class'): - changes['TableClass'] = module.params.get('table_class') + if module.params.get("table_class"): + if module.params.get("table_class") != current_table.get("table_class"): + changes["TableClass"] = module.params.get("table_class") global_index_changes = _global_index_changes(current_table) if global_index_changes: - changes['GlobalSecondaryIndexUpdates'] = global_index_changes + changes["GlobalSecondaryIndexUpdates"] = global_index_changes # Only one index can be changed at a time except if changing the billing mode, pass the first during the # main update and deal with the others on a slow retry to wait for # completion if current_billing_mode == new_billing_mode: if len(global_index_changes) > 1: - changes['GlobalSecondaryIndexUpdates'] = [global_index_changes[0]] + changes["GlobalSecondaryIndexUpdates"] = [global_index_changes[0]] additional_global_index_changes = global_index_changes[1:] local_index_changes = _local_index_changes(current_table) if local_index_changes: - changes['LocalSecondaryIndexUpdates'] = local_index_changes + changes["LocalSecondaryIndexUpdates"] = local_index_changes if not changes: return False @@ -836,38 +850,41 @@ def _update_table(current_table): return True if global_index_changes or local_index_changes: - changes['AttributeDefinitions'] = _generate_attributes() + changes["AttributeDefinitions"] = _generate_attributes() try: - client.update_table( - aws_retry=True, - TableName=module.params.get('name'), - **changes - ) + client.update_table(aws_retry=True, TableName=module.params.get("name"), **changes) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to update table") if additional_global_index_changes: for index in additional_global_index_changes: try: - _update_table_with_long_retry(GlobalSecondaryIndexUpdates=[index], AttributeDefinitions=changes['AttributeDefinitions']) + _update_table_with_long_retry( + GlobalSecondaryIndexUpdates=[index], AttributeDefinitions=changes["AttributeDefinitions"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to update table", changes=changes, - additional_global_index_changes=additional_global_index_changes) - - if module.params.get('wait'): + module.fail_json_aws( + e, + msg="Failed to update table", + changes=changes, + additional_global_index_changes=additional_global_index_changes, + ) + + if module.params.get("wait"): wait_exists() return True def _update_tags(current_table): - _tags = module.params.get('tags') + _tags = module.params.get("tags") if _tags is None: return False - tags_to_add, tags_to_remove = compare_aws_tags(current_table['tags'], module.params.get('tags'), - purge_tags=module.params.get('purge_tags')) + tags_to_add, tags_to_remove = compare_aws_tags( + current_table["tags"], module.params.get("tags"), purge_tags=module.params.get("purge_tags") + ) # If neither need updating we can return already if not (tags_to_add or tags_to_remove): @@ -880,7 +897,7 @@ def _update_tags(current_table): try: client.tag_resource( aws_retry=True, - ResourceArn=current_table['arn'], + ResourceArn=current_table["arn"], Tags=ansible_dict_to_boto3_tag_list(tags_to_add), ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -889,7 +906,7 @@ def _update_tags(current_table): try: client.untag_resource( aws_retry=True, - ResourceArn=current_table['arn'], + ResourceArn=current_table["arn"], TagKeys=tags_to_remove, ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -901,28 +918,32 @@ def _update_tags(current_table): def update_table(current_table): primary_index_changes = _primary_index_changes(current_table) if primary_index_changes: - module.fail_json("DynamoDB does not support updating the Primary keys on a table. Changed paramters are: {0}".format(primary_index_changes)) + module.fail_json( + "DynamoDB does not support updating the Primary keys on a table. Changed paramters are: {0}".format( + primary_index_changes + ) + ) changed = False changed |= _update_table(current_table) changed |= _update_tags(current_table) - if module.params.get('wait'): + if module.params.get("wait"): wait_exists() return changed def create_table(): - table_name = module.params.get('name') - table_class = module.params.get('table_class') - hash_key_name = module.params.get('hash_key_name') - billing_mode = module.params.get('billing_mode') + table_name = module.params.get("name") + table_class = module.params.get("table_class") + hash_key_name = module.params.get("hash_key_name") + billing_mode = module.params.get("billing_mode") if billing_mode is None: billing_mode = "PROVISIONED" - tags = ansible_dict_to_boto3_tag_list(module.params.get('tags') or {}) + tags = ansible_dict_to_boto3_tag_list(module.params.get("tags") or {}) if not hash_key_name: module.fail_json('"hash_key_name" must be provided when creating a new table.') @@ -950,20 +971,20 @@ def create_table(): ) if table_class: - params['TableClass'] = table_class + params["TableClass"] = table_class if billing_mode == "PROVISIONED": - params['ProvisionedThroughput'] = throughput + params["ProvisionedThroughput"] = throughput if local_indexes: - params['LocalSecondaryIndexes'] = local_indexes + params["LocalSecondaryIndexes"] = local_indexes if global_indexes: - params['GlobalSecondaryIndexes'] = global_indexes + params["GlobalSecondaryIndexes"] = global_indexes try: client.create_table(aws_retry=True, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to create table') + module.fail_json_aws(e, msg="Failed to create table") - if module.params.get('wait'): + if module.params.get("wait"): wait_exists() return True @@ -976,30 +997,34 @@ def delete_table(current_table): if module.check_mode: return True - table_name = module.params.get('name') + table_name = module.params.get("name") # If an index is mid-update then we have to wait for the update to complete # before deletion will succeed long_retry = AWSRetry.jittered_backoff( - retries=45, delay=5, max_delay=30, - catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException'], + retries=45, + delay=5, + max_delay=30, + catch_extra_error_codes=["LimitExceededException", "ResourceInUseException"], ) try: long_retry(client.delete_table)(TableName=table_name) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to delete table') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to delete table") - if module.params.get('wait'): + if module.params.get("wait"): wait_not_exists() return True def main(): - global module global client @@ -1007,36 +1032,36 @@ def main(): # different parameters, use a separate namespace for names, # and local indexes can't be updated. index_options = dict( - name=dict(type='str', required=True), + name=dict(type="str", required=True), # It would be nice to make this optional, but because Local and Global # indexes are mixed in here we need this to be able to tell to which # group of indexes the index belongs. - type=dict(type='str', required=True, choices=INDEX_TYPE_OPTIONS), - hash_key_name=dict(type='str', required=False), - hash_key_type=dict(type='str', required=False, choices=KEY_TYPE_CHOICES), - range_key_name=dict(type='str', required=False), - range_key_type=dict(type='str', required=False, choices=KEY_TYPE_CHOICES), - includes=dict(type='list', required=False, elements='str'), - read_capacity=dict(type='int', required=False), - write_capacity=dict(type='int', required=False), + type=dict(type="str", required=True, choices=INDEX_TYPE_OPTIONS), + hash_key_name=dict(type="str", required=False), + hash_key_type=dict(type="str", required=False, choices=KEY_TYPE_CHOICES), + range_key_name=dict(type="str", required=False), + range_key_type=dict(type="str", required=False, choices=KEY_TYPE_CHOICES), + includes=dict(type="list", required=False, elements="str"), + read_capacity=dict(type="int", required=False), + write_capacity=dict(type="int", required=False), ) argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), - name=dict(required=True, type='str'), - hash_key_name=dict(type='str'), - hash_key_type=dict(type='str', choices=KEY_TYPE_CHOICES), - range_key_name=dict(type='str'), - range_key_type=dict(type='str', choices=KEY_TYPE_CHOICES), - billing_mode=dict(type='str', choices=['PROVISIONED', 'PAY_PER_REQUEST']), - read_capacity=dict(type='int'), - write_capacity=dict(type='int'), - indexes=dict(default=[], type='list', elements='dict', options=index_options), - table_class=dict(type='str', choices=['STANDARD', 'STANDARD_INFREQUENT_ACCESS']), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(default=300, type='int', aliases=['wait_for_active_timeout']), + state=dict(default="present", choices=["present", "absent"]), + name=dict(required=True, type="str"), + hash_key_name=dict(type="str"), + hash_key_type=dict(type="str", choices=KEY_TYPE_CHOICES), + range_key_name=dict(type="str"), + range_key_type=dict(type="str", choices=KEY_TYPE_CHOICES), + billing_mode=dict(type="str", choices=["PROVISIONED", "PAY_PER_REQUEST"]), + read_capacity=dict(type="int"), + write_capacity=dict(type="int"), + indexes=dict(default=[], type="list", elements="dict", options=index_options), + table_class=dict(type="str", choices=["STANDARD", "STANDARD_INFREQUENT_ACCESS"]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + wait=dict(type="bool", default=True), + wait_timeout=dict(default=300, type="int", aliases=["wait_for_active_timeout"]), ) module = AnsibleAWSModule( @@ -1046,41 +1071,41 @@ def main(): ) retry_decorator = AWSRetry.jittered_backoff( - catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException', 'ResourceNotFoundException'], + catch_extra_error_codes=["LimitExceededException", "ResourceInUseException", "ResourceNotFoundException"], ) - client = module.client('dynamodb', retry_decorator=retry_decorator) + client = module.client("dynamodb", retry_decorator=retry_decorator) - if module.params.get('table_class'): - module.require_botocore_at_least('1.23.18', reason='to set table_class') + if module.params.get("table_class"): + module.require_botocore_at_least("1.23.18", reason="to set table_class") current_table = get_dynamodb_table() changed = False table = None results = dict() - state = module.params.get('state') - if state == 'present': + state = module.params.get("state") + if state == "present": if current_table: changed |= update_table(current_table) else: changed |= create_table() table = get_dynamodb_table() - elif state == 'absent': + elif state == "absent": changed |= delete_table(current_table) compat_results = compatability_results(table) if compat_results: results.update(compat_results) - results['changed'] = changed + results["changed"] = changed if table: # These are used to pass computed data about, not needed for users - table.pop('_global_index_map', None) - table.pop('_local_index_map', None) - results['table'] = table + table.pop("_global_index_map", None) + table.pop("_local_index_map", None) + results["table"] = table module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/dynamodb_ttl.py b/plugins/modules/dynamodb_ttl.py index bd1c7789903..eca236cf49a 100644 --- a/plugins/modules/dynamodb_ttl.py +++ b/plugins/modules/dynamodb_ttl.py @@ -71,48 +71,48 @@ def get_current_ttl_state(c, table_name): - '''Fetch the state dict for a table.''' + """Fetch the state dict for a table.""" current_state = c.describe_time_to_live(TableName=table_name) - return current_state.get('TimeToLiveDescription') + return current_state.get("TimeToLiveDescription") def does_state_need_changing(attribute_name, desired_state, current_spec): - '''Run checks to see if the table needs to be modified. Basically a dirty check.''' + """Run checks to see if the table needs to be modified. Basically a dirty check.""" if not current_spec: # we don't have an entry (or a table?) return True - if desired_state.lower() == 'enable' and current_spec.get('TimeToLiveStatus') not in ['ENABLING', 'ENABLED']: + if desired_state.lower() == "enable" and current_spec.get("TimeToLiveStatus") not in ["ENABLING", "ENABLED"]: return True - if desired_state.lower() == 'disable' and current_spec.get('TimeToLiveStatus') not in ['DISABLING', 'DISABLED']: + if desired_state.lower() == "disable" and current_spec.get("TimeToLiveStatus") not in ["DISABLING", "DISABLED"]: return True - if attribute_name != current_spec.get('AttributeName'): + if attribute_name != current_spec.get("AttributeName"): return True return False def set_ttl_state(c, table_name, state, attribute_name): - '''Set our specification. Returns the update_time_to_live specification dict, - which is different than the describe_* call.''' + """Set our specification. Returns the update_time_to_live specification dict, + which is different than the describe_* call.""" is_enabled = False - if state.lower() == 'enable': + if state.lower() == "enable": is_enabled = True ret = c.update_time_to_live( TableName=table_name, TimeToLiveSpecification={ - 'Enabled': is_enabled, - 'AttributeName': attribute_name - } + "Enabled": is_enabled, + "AttributeName": attribute_name, + }, ) - return ret.get('TimeToLiveSpecification') + return ret.get("TimeToLiveSpecification") def main(): argument_spec = dict( - state=dict(choices=['enable', 'disable']), + state=dict(choices=["enable", "disable"]), table_name=dict(required=True), attribute_name=dict(required=True), ) @@ -121,26 +121,28 @@ def main(): ) try: - dbclient = module.client('dynamodb') + dbclient = module.client("dynamodb") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - result = {'changed': False} - state = module.params['state'] + result = {"changed": False} + state = module.params["state"] # wrap all our calls to catch the standard exceptions. We don't pass `module` in to the # methods so it's easier to do here. try: - current_state = get_current_ttl_state(dbclient, module.params['table_name']) + current_state = get_current_ttl_state(dbclient, module.params["table_name"]) - if does_state_need_changing(module.params['attribute_name'], module.params['state'], current_state): + if does_state_need_changing(module.params["attribute_name"], module.params["state"], current_state): # changes needed - new_state = set_ttl_state(dbclient, module.params['table_name'], module.params['state'], module.params['attribute_name']) - result['current_status'] = new_state - result['changed'] = True + new_state = set_ttl_state( + dbclient, module.params["table_name"], module.params["state"], module.params["attribute_name"] + ) + result["current_status"] = new_state + result["changed"] = True else: # no changes needed - result['current_status'] = current_state + result["current_status"] = current_state except botocore.exceptions.ClientError as e: module.fail_json_aws(e, msg="Failed to get or update ttl state") @@ -152,5 +154,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ec2_ami_copy.py b/plugins/modules/ec2_ami_copy.py index 5af897cdb80..5d7e49bde90 100644 --- a/plugins/modules/ec2_ami_copy.py +++ b/plugins/modules/ec2_ami_copy.py @@ -157,67 +157,67 @@ def copy_image(module, ec2): image = None changed = False - tags = module.params.get('tags') - - params = {'SourceRegion': module.params.get('source_region'), - 'SourceImageId': module.params.get('source_image_id'), - 'Name': module.params.get('name'), - 'Description': module.params.get('description'), - 'Encrypted': module.params.get('encrypted'), - } - if module.params.get('kms_key_id'): - params['KmsKeyId'] = module.params.get('kms_key_id') + tags = module.params.get("tags") + + params = { + "SourceRegion": module.params.get("source_region"), + "SourceImageId": module.params.get("source_image_id"), + "Name": module.params.get("name"), + "Description": module.params.get("description"), + "Encrypted": module.params.get("encrypted"), + } + if module.params.get("kms_key_id"): + params["KmsKeyId"] = module.params.get("kms_key_id") try: - if module.params.get('tag_equality'): - filters = [{'Name': 'tag:%s' % k, 'Values': [v]} for (k, v) in module.params.get('tags').items()] - filters.append(dict(Name='state', Values=['available', 'pending'])) + if module.params.get("tag_equality"): + filters = [{"Name": "tag:%s" % k, "Values": [v]} for (k, v) in module.params.get("tags").items()] + filters.append(dict(Name="state", Values=["available", "pending"])) images = ec2.describe_images(Filters=filters) - if len(images['Images']) > 0: - image = images['Images'][0] + if len(images["Images"]) > 0: + image = images["Images"][0] if not image: image = ec2.copy_image(**params) - image_id = image['ImageId'] + image_id = image["ImageId"] if tags: - ec2.create_tags(Resources=[image_id], - Tags=ansible_dict_to_boto3_tag_list(tags)) + ec2.create_tags(Resources=[image_id], Tags=ansible_dict_to_boto3_tag_list(tags)) changed = True - if module.params.get('wait'): + if module.params.get("wait"): delay = 15 - max_attempts = module.params.get('wait_timeout') // delay - image_id = image.get('ImageId') - ec2.get_waiter('image_available').wait( - ImageIds=[image_id], - WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts} + max_attempts = module.params.get("wait_timeout") // delay + image_id = image.get("ImageId") + ec2.get_waiter("image_available").wait( + ImageIds=[image_id], WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts} ) module.exit_json(changed=changed, **camel_dict_to_snake_dict(image)) except WaiterError as e: - module.fail_json_aws(e, msg='An error occurred waiting for the image to become available') + module.fail_json_aws(e, msg="An error occurred waiting for the image to become available") except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Could not copy AMI") except Exception as e: - module.fail_json(msg='Unhandled exception. (%s)' % to_native(e)) + module.fail_json(msg="Unhandled exception. (%s)" % to_native(e)) def main(): argument_spec = dict( source_region=dict(required=True), source_image_id=dict(required=True), - name=dict(default='default'), - description=dict(default=''), - encrypted=dict(type='bool', default=False, required=False), - kms_key_id=dict(type='str', required=False), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=600), - tags=dict(type='dict', aliases=['resource_tags']), - tag_equality=dict(type='bool', default=False)) + name=dict(default="default"), + description=dict(default=""), + encrypted=dict(type="bool", default=False, required=False), + kms_key_id=dict(type="str", required=False), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=600), + tags=dict(type="dict", aliases=["resource_tags"]), + tag_equality=dict(type="bool", default=False), + ) module = AnsibleAWSModule(argument_spec=argument_spec) - ec2 = module.client('ec2') + ec2 = module.client("ec2") copy_image(module, ec2) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ec2_carrier_gateway.py b/plugins/modules/ec2_carrier_gateway.py index 3458170e393..6517879c0f8 100644 --- a/plugins/modules/ec2_carrier_gateway.py +++ b/plugins/modules/ec2_carrier_gateway.py @@ -159,9 +159,7 @@ def get_matching_cagw(self, vpc_id, carrier_gateway_id=None): cagw = None if len(cagws) > 1: - self._module.fail_json( - msg=f"EC2 returned more than one Carrier Gateway for VPC {vpc_id}, aborting" - ) + self._module.fail_json(msg=f"EC2 returned more than one Carrier Gateway for VPC {vpc_id}, aborting") elif cagws: cagw = camel_dict_to_snake_dict(cagws[0]) diff --git a/plugins/modules/ec2_customer_gateway.py b/plugins/modules/ec2_customer_gateway.py index b858f9c4ead..19fc8eab7f5 100644 --- a/plugins/modules/ec2_customer_gateway.py +++ b/plugins/modules/ec2_customer_gateway.py @@ -121,21 +121,17 @@ class Ec2CustomerGatewayManager: - def __init__(self, module): self.module = module try: - self.ec2 = module.client('ec2') + self.ec2 = module.client("ec2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState']) + @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=["IncorrectState"]) def ensure_cgw_absent(self, gw_id): - response = self.ec2.delete_customer_gateway( - DryRun=False, - CustomerGatewayId=gw_id - ) + response = self.ec2.delete_customer_gateway(DryRun=False, CustomerGatewayId=gw_id) return response def ensure_cgw_present(self, bgp_asn, ip_address): @@ -143,7 +139,7 @@ def ensure_cgw_present(self, bgp_asn, ip_address): bgp_asn = 65000 response = self.ec2.create_customer_gateway( DryRun=False, - Type='ipsec.1', + Type="ipsec.1", PublicIp=ip_address, BgpAsn=bgp_asn, ) @@ -156,11 +152,8 @@ def tag_cgw_name(self, gw_id, name): gw_id, ], Tags=[ - { - 'Key': 'Name', - 'Value': name - }, - ] + {"Key": "Name", "Value": name}, + ], ) return response @@ -169,86 +162,84 @@ def describe_gateways(self, ip_address): DryRun=False, Filters=[ { - 'Name': 'state', - 'Values': [ - 'available', - ] + "Name": "state", + "Values": [ + "available", + ], }, { - 'Name': 'ip-address', - 'Values': [ + "Name": "ip-address", + "Values": [ ip_address, - ] - } - ] + ], + }, + ], ) return response def main(): argument_spec = dict( - bgp_asn=dict(required=False, type='int'), + bgp_asn=dict(required=False, type="int"), ip_address=dict(required=True), name=dict(required=True), - routing=dict(default='dynamic', choices=['dynamic', 'static']), - state=dict(default='present', choices=['present', 'absent']), + routing=dict(default="dynamic", choices=["dynamic", "static"]), + state=dict(default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ - ('routing', 'dynamic', ['bgp_asn']) - ] + ("routing", "dynamic", ["bgp_asn"]), + ], ) gw_mgr = Ec2CustomerGatewayManager(module) - name = module.params.get('name') + name = module.params.get("name") - existing = gw_mgr.describe_gateways(module.params['ip_address']) + existing = gw_mgr.describe_gateways(module.params["ip_address"]) results = dict(changed=False) - if module.params['state'] == 'present': - if existing['CustomerGateways']: - existing['CustomerGateway'] = existing['CustomerGateways'][0] - results['gateway'] = existing - if existing['CustomerGateway']['Tags']: - tag_array = existing['CustomerGateway']['Tags'] + if module.params["state"] == "present": + if existing["CustomerGateways"]: + existing["CustomerGateway"] = existing["CustomerGateways"][0] + results["gateway"] = existing + if existing["CustomerGateway"]["Tags"]: + tag_array = existing["CustomerGateway"]["Tags"] for key, value in enumerate(tag_array): - if value['Key'] == 'Name': - current_name = value['Value'] + if value["Key"] == "Name": + current_name = value["Value"] if current_name != name: - results['name'] = gw_mgr.tag_cgw_name( - results['gateway']['CustomerGateway']['CustomerGatewayId'], - module.params['name'], + results["name"] = gw_mgr.tag_cgw_name( + results["gateway"]["CustomerGateway"]["CustomerGatewayId"], + module.params["name"], ) - results['changed'] = True + results["changed"] = True else: if not module.check_mode: - results['gateway'] = gw_mgr.ensure_cgw_present( - module.params['bgp_asn'], - module.params['ip_address'], + results["gateway"] = gw_mgr.ensure_cgw_present( + module.params["bgp_asn"], + module.params["ip_address"], ) - results['name'] = gw_mgr.tag_cgw_name( - results['gateway']['CustomerGateway']['CustomerGatewayId'], - module.params['name'], + results["name"] = gw_mgr.tag_cgw_name( + results["gateway"]["CustomerGateway"]["CustomerGatewayId"], + module.params["name"], ) - results['changed'] = True + results["changed"] = True - elif module.params['state'] == 'absent': - if existing['CustomerGateways']: - existing['CustomerGateway'] = existing['CustomerGateways'][0] - results['gateway'] = existing + elif module.params["state"] == "absent": + if existing["CustomerGateways"]: + existing["CustomerGateway"] = existing["CustomerGateways"][0] + results["gateway"] = existing if not module.check_mode: - results['gateway'] = gw_mgr.ensure_cgw_absent( - existing['CustomerGateway']['CustomerGatewayId'] - ) - results['changed'] = True + results["gateway"] = gw_mgr.ensure_cgw_absent(existing["CustomerGateway"]["CustomerGatewayId"]) + results["changed"] = True pretty_results = camel_dict_to_snake_dict(results) module.exit_json(**pretty_results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ec2_customer_gateway_info.py b/plugins/modules/ec2_customer_gateway_info.py index d0674f52b19..976d3f370d2 100644 --- a/plugins/modules/ec2_customer_gateway_info.py +++ b/plugins/modules/ec2_customer_gateway_info.py @@ -95,44 +95,46 @@ def date_handler(obj): - return obj.isoformat() if hasattr(obj, 'isoformat') else obj + return obj.isoformat() if hasattr(obj, "isoformat") else obj def list_customer_gateways(connection, module): params = dict() - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - params['CustomerGatewayIds'] = module.params.get('customer_gateway_ids') + params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + params["CustomerGatewayIds"] = module.params.get("customer_gateway_ids") try: result = json.loads(json.dumps(connection.describe_customer_gateways(**params), default=date_handler)) except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Could not describe customer gateways") - snaked_customer_gateways = [camel_dict_to_snake_dict(gateway) for gateway in result['CustomerGateways']] + snaked_customer_gateways = [camel_dict_to_snake_dict(gateway) for gateway in result["CustomerGateways"]] if snaked_customer_gateways: for customer_gateway in snaked_customer_gateways: - customer_gateway['tags'] = boto3_tag_list_to_ansible_dict(customer_gateway.get('tags', [])) - customer_gateway_name = customer_gateway['tags'].get('Name') + customer_gateway["tags"] = boto3_tag_list_to_ansible_dict(customer_gateway.get("tags", [])) + customer_gateway_name = customer_gateway["tags"].get("Name") if customer_gateway_name: - customer_gateway['customer_gateway_name'] = customer_gateway_name + customer_gateway["customer_gateway_name"] = customer_gateway_name module.exit_json(changed=False, customer_gateways=snaked_customer_gateways) def main(): - argument_spec = dict( - customer_gateway_ids=dict(default=[], type='list', elements='str'), - filters=dict(default={}, type='dict') + customer_gateway_ids=dict(default=[], type="list", elements="str"), filters=dict(default={}, type="dict") ) - module = AnsibleAWSModule(argument_spec=argument_spec, - mutually_exclusive=[['customer_gateway_ids', 'filters']], - supports_check_mode=True) + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ["customer_gateway_ids", "filters"], + ], + supports_check_mode=True, + ) - connection = module.client('ec2') + connection = module.client("ec2") list_customer_gateways(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ec2_launch_template.py b/plugins/modules/ec2_launch_template.py index b807d3aa09f..8e1240d285f 100644 --- a/plugins/modules/ec2_launch_template.py +++ b/plugins/modules/ec2_launch_template.py @@ -446,60 +446,85 @@ def determine_iam_role(module, name_or_arn): - if re.match(r'^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$', name_or_arn): - return {'arn': name_or_arn} - iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + if re.match(r"^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$", name_or_arn): + return {"arn": name_or_arn} + iam = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) try: role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True) - return {'arn': role['InstanceProfile']['Arn']} - except is_boto3_error_code('NoSuchEntity') as e: + return {"arn": role["InstanceProfile"]["Arn"]} + except is_boto3_error_code("NoSuchEntity") as e: module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn)) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format(name_or_arn)) + module.fail_json_aws( + e, + msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format( + name_or_arn + ), + ) def existing_templates(module): - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + ec2 = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) matches = None try: - if module.params.get('template_id'): - matches = ec2.describe_launch_templates(LaunchTemplateIds=[module.params.get('template_id')], aws_retry=True) - elif module.params.get('template_name'): - matches = ec2.describe_launch_templates(LaunchTemplateNames=[module.params.get('template_name')], aws_retry=True) - except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException') as e: + if module.params.get("template_id"): + matches = ec2.describe_launch_templates( + LaunchTemplateIds=[module.params.get("template_id")], aws_retry=True + ) + elif module.params.get("template_name"): + matches = ec2.describe_launch_templates( + LaunchTemplateNames=[module.params.get("template_name")], aws_retry=True + ) + except is_boto3_error_code("InvalidLaunchTemplateName.NotFoundException") as e: # no named template was found, return nothing/empty versions return None, [] - except is_boto3_error_code('InvalidLaunchTemplateId.Malformed') as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Launch template with ID {0} is not a valid ID. It should start with `lt-....`'.format( - module.params.get('launch_template_id'))) - except is_boto3_error_code('InvalidLaunchTemplateId.NotFoundException') as e: # pylint: disable=duplicate-except + except is_boto3_error_code("InvalidLaunchTemplateId.Malformed") as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, + msg="Launch template with ID {0} is not a valid ID. It should start with `lt-....`".format( + module.params.get("launch_template_id") + ), + ) + except is_boto3_error_code("InvalidLaunchTemplateId.NotFoundException") as e: # pylint: disable=duplicate-except module.fail_json_aws( - e, msg='Launch template with ID {0} could not be found, please supply a name ' - 'instead so that a new template can be created'.format(module.params.get('launch_template_id'))) + e, + msg="Launch template with ID {0} could not be found, please supply a name " + "instead so that a new template can be created".format(module.params.get("launch_template_id")), + ) except (ClientError, BotoCoreError, WaiterError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Could not check existing launch templates. This may be an IAM permission problem.') + module.fail_json_aws(e, msg="Could not check existing launch templates. This may be an IAM permission problem.") else: - template = matches['LaunchTemplates'][0] - template_id, template_version, template_default = template['LaunchTemplateId'], template['LatestVersionNumber'], template['DefaultVersionNumber'] + template = matches["LaunchTemplates"][0] + template_id, template_version, template_default = ( + template["LaunchTemplateId"], + template["LatestVersionNumber"], + template["DefaultVersionNumber"], + ) try: - return template, ec2.describe_launch_template_versions(LaunchTemplateId=template_id, aws_retry=True)['LaunchTemplateVersions'] + return ( + template, + ec2.describe_launch_template_versions(LaunchTemplateId=template_id, aws_retry=True)[ + "LaunchTemplateVersions" + ], + ) except (ClientError, BotoCoreError, WaiterError) as e: - module.fail_json_aws(e, msg='Could not find launch template versions for {0} (ID: {1}).'.format(template['LaunchTemplateName'], template_id)) + module.fail_json_aws( + e, + msg="Could not find launch template versions for {0} (ID: {1}).".format( + template["LaunchTemplateName"], template_id + ), + ) def params_to_launch_data(module, template_params): - if template_params.get('tags'): - tag_list = ansible_dict_to_boto3_tag_list(template_params.get('tags')) - template_params['tag_specifications'] = [ - { - 'resource_type': r_type, - 'tags': tag_list - } - for r_type in ('instance', 'volume') + if template_params.get("tags"): + tag_list = ansible_dict_to_boto3_tag_list(template_params.get("tags")) + template_params["tag_specifications"] = [ + {"resource_type": r_type, "tags": tag_list} for r_type in ("instance", "volume") ] - del template_params['tags'] - if module.params.get('iam_instance_profile'): - template_params['iam_instance_profile'] = determine_iam_role(module, module.params['iam_instance_profile']) + del template_params["tags"] + if module.params.get("iam_instance_profile"): + template_params["iam_instance_profile"] = determine_iam_role(module, module.params["iam_instance_profile"]) params = snake_dict_to_camel_dict( dict((k, v) for k, v in template_params.items() if v is not None), capitalize_first=True, @@ -508,71 +533,82 @@ def params_to_launch_data(module, template_params): def delete_template(module): - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + ec2 = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) template, template_versions = existing_templates(module) deleted_versions = [] if template or template_versions: - non_default_versions = [to_text(t['VersionNumber']) for t in template_versions if not t['DefaultVersion']] + non_default_versions = [to_text(t["VersionNumber"]) for t in template_versions if not t["DefaultVersion"]] if non_default_versions: try: v_resp = ec2.delete_launch_template_versions( - LaunchTemplateId=template['LaunchTemplateId'], + LaunchTemplateId=template["LaunchTemplateId"], Versions=non_default_versions, aws_retry=True, ) - if v_resp['UnsuccessfullyDeletedLaunchTemplateVersions']: - module.warn('Failed to delete template versions {0} on launch template {1}'.format( - v_resp['UnsuccessfullyDeletedLaunchTemplateVersions'], - template['LaunchTemplateId'], - )) - deleted_versions = [camel_dict_to_snake_dict(v) for v in v_resp['SuccessfullyDeletedLaunchTemplateVersions']] + if v_resp["UnsuccessfullyDeletedLaunchTemplateVersions"]: + module.warn( + "Failed to delete template versions {0} on launch template {1}".format( + v_resp["UnsuccessfullyDeletedLaunchTemplateVersions"], + template["LaunchTemplateId"], + ) + ) + deleted_versions = [ + camel_dict_to_snake_dict(v) for v in v_resp["SuccessfullyDeletedLaunchTemplateVersions"] + ] except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not delete existing versions of the launch template {0}".format(template['LaunchTemplateId'])) + module.fail_json_aws( + e, + msg="Could not delete existing versions of the launch template {0}".format( + template["LaunchTemplateId"] + ), + ) try: resp = ec2.delete_launch_template( - LaunchTemplateId=template['LaunchTemplateId'], + LaunchTemplateId=template["LaunchTemplateId"], aws_retry=True, ) except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not delete launch template {0}".format(template['LaunchTemplateId'])) + module.fail_json_aws(e, msg="Could not delete launch template {0}".format(template["LaunchTemplateId"])) return { - 'deleted_versions': deleted_versions, - 'deleted_template': camel_dict_to_snake_dict(resp['LaunchTemplate']), - 'changed': True, + "deleted_versions": deleted_versions, + "deleted_template": camel_dict_to_snake_dict(resp["LaunchTemplate"]), + "changed": True, } else: - return {'changed': False} + return {"changed": False} def create_or_update(module, template_options): - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidLaunchTemplateId.NotFound'])) + ec2 = module.client( + "ec2", retry_decorator=AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidLaunchTemplateId.NotFound"]) + ) template, template_versions = existing_templates(module) out = {} lt_data = params_to_launch_data(module, dict((k, v) for k, v in module.params.items() if k in template_options)) lt_data = scrub_none_parameters(lt_data, descend_into_lists=True) - if lt_data.get('MetadataOptions'): - if not module.botocore_at_least('1.23.30'): + if lt_data.get("MetadataOptions"): + if not module.botocore_at_least("1.23.30"): # fail only if enabled is requested - if lt_data['MetadataOptions'].get('InstanceMetadataTags') == 'enabled': - module.require_botocore_at_least('1.23.30', reason='to set instance_metadata_tags') + if lt_data["MetadataOptions"].get("InstanceMetadataTags") == "enabled": + module.require_botocore_at_least("1.23.30", reason="to set instance_metadata_tags") # pop if it's not requested to keep backwards compatibility. # otherwise the modules failes because parameters are set due default values - lt_data['MetadataOptions'].pop('InstanceMetadataTags') + lt_data["MetadataOptions"].pop("InstanceMetadataTags") - if not module.botocore_at_least('1.21.29'): + if not module.botocore_at_least("1.21.29"): # fail only if enabled is requested - if lt_data['MetadataOptions'].get('HttpProtocolIpv6') == 'enabled': - module.require_botocore_at_least('1.21.29', reason='to set http_protocol_ipv6') + if lt_data["MetadataOptions"].get("HttpProtocolIpv6") == "enabled": + module.require_botocore_at_least("1.21.29", reason="to set http_protocol_ipv6") # pop if it's not requested to keep backwards compatibility. # otherwise the modules failes because parameters are set due default values - lt_data['MetadataOptions'].pop('HttpProtocolIpv6') + lt_data["MetadataOptions"].pop("HttpProtocolIpv6") if not (template or template_versions): # create a full new one try: resp = ec2.create_launch_template( - LaunchTemplateName=module.params['template_name'], + LaunchTemplateName=module.params["template_name"], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, aws_retry=True, @@ -580,26 +616,26 @@ def create_or_update(module, template_options): except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create launch template") template, template_versions = existing_templates(module) - out['changed'] = True + out["changed"] = True elif template and template_versions: most_recent = sorted(template_versions, key=lambda x: x["VersionNumber"])[-1] if lt_data == most_recent["LaunchTemplateData"] and module.params["version_description"] == most_recent.get( "VersionDescription", "" ): - out['changed'] = False + out["changed"] = False return out try: - if module.params.get('source_version') in (None, ''): + if module.params.get("source_version") in (None, ""): resp = ec2.create_launch_template_version( - LaunchTemplateId=template['LaunchTemplateId'], + LaunchTemplateId=template["LaunchTemplateId"], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, VersionDescription=str(module.params["version_description"]), aws_retry=True, ) - elif module.params.get('source_version') == 'latest': + elif module.params.get("source_version") == "latest": resp = ec2.create_launch_template_version( - LaunchTemplateId=template['LaunchTemplateId'], + LaunchTemplateId=template["LaunchTemplateId"], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, SourceVersion=str(most_recent["VersionNumber"]), @@ -608,15 +644,24 @@ def create_or_update(module, template_options): ) else: try: - int(module.params.get('source_version')) + int(module.params.get("source_version")) except ValueError: - module.fail_json(msg='source_version param was not a valid integer, got "{0}"'.format(module.params.get('source_version'))) + module.fail_json( + msg='source_version param was not a valid integer, got "{0}"'.format( + module.params.get("source_version") + ) + ) # get source template version - source_version = next((v for v in template_versions if v['VersionNumber'] == int(module.params.get('source_version'))), None) + source_version = next( + (v for v in template_versions if v["VersionNumber"] == int(module.params.get("source_version"))), + None, + ) if source_version is None: - module.fail_json(msg='source_version does not exist, got "{0}"'.format(module.params.get('source_version'))) + module.fail_json( + msg='source_version does not exist, got "{0}"'.format(module.params.get("source_version")) + ) resp = ec2.create_launch_template_version( - LaunchTemplateId=template['LaunchTemplateId'], + LaunchTemplateId=template["LaunchTemplateId"], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, SourceVersion=str(source_version["VersionNumber"]), @@ -624,31 +669,35 @@ def create_or_update(module, template_options): aws_retry=True, ) - if module.params.get('default_version') in (None, ''): + if module.params.get("default_version") in (None, ""): # no need to do anything, leave the existing version as default pass - elif module.params.get('default_version') == 'latest': + elif module.params.get("default_version") == "latest": set_default = ec2.modify_launch_template( - LaunchTemplateId=template['LaunchTemplateId'], - DefaultVersion=to_text(resp['LaunchTemplateVersion']['VersionNumber']), + LaunchTemplateId=template["LaunchTemplateId"], + DefaultVersion=to_text(resp["LaunchTemplateVersion"]["VersionNumber"]), ClientToken=uuid4().hex, aws_retry=True, ) else: try: - int(module.params.get('default_version')) + int(module.params.get("default_version")) except ValueError: - module.fail_json(msg='default_version param was not a valid integer, got "{0}"'.format(module.params.get('default_version'))) + module.fail_json( + msg='default_version param was not a valid integer, got "{0}"'.format( + module.params.get("default_version") + ) + ) set_default = ec2.modify_launch_template( - LaunchTemplateId=template['LaunchTemplateId'], - DefaultVersion=to_text(int(module.params.get('default_version'))), + LaunchTemplateId=template["LaunchTemplateId"], + DefaultVersion=to_text(int(module.params.get("default_version"))), ClientToken=uuid4().hex, aws_retry=True, ) except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create subsequent launch template version") template, template_versions = existing_templates(module) - out['changed'] = True + out["changed"] = True return out @@ -658,43 +707,38 @@ def format_module_output(module): template = camel_dict_to_snake_dict(template) template_versions = [camel_dict_to_snake_dict(v) for v in template_versions] for v in template_versions: - for ts in (v['launch_template_data'].get('tag_specifications') or []): - ts['tags'] = boto3_tag_list_to_ansible_dict(ts.pop('tags')) + for ts in v["launch_template_data"].get("tag_specifications") or []: + ts["tags"] = boto3_tag_list_to_ansible_dict(ts.pop("tags")) output.update(dict(template=template, versions=template_versions)) - output['default_template'] = [ - v for v in template_versions - if v.get('default_version') + output["default_template"] = [v for v in template_versions if v.get("default_version")][0] + output["latest_template"] = [ + v + for v in template_versions + if (v.get("version_number") and int(v["version_number"]) == int(template["latest_version_number"])) ][0] - output['latest_template'] = [ - v for v in template_versions - if ( - v.get('version_number') and - int(v['version_number']) == int(template['latest_version_number']) - ) - ][0] - if "version_number" in output['default_template']: - output['default_version'] = output['default_template']['version_number'] - if "version_number" in output['latest_template']: - output['latest_version'] = output['latest_template']['version_number'] + if "version_number" in output["default_template"]: + output["default_version"] = output["default_template"]["version_number"] + if "version_number" in output["latest_template"]: + output["latest_version"] = output["latest_template"]["version_number"] return output def main(): template_options = dict( block_device_mappings=dict( - type='list', - elements='dict', + type="list", + elements="dict", options=dict( device_name=dict(), ebs=dict( - type='dict', + type="dict", options=dict( - delete_on_termination=dict(type='bool'), - encrypted=dict(type='bool'), - iops=dict(type='int'), + delete_on_termination=dict(type="bool"), + encrypted=dict(type="bool"), + iops=dict(type="int"), kms_key_id=dict(), snapshot_id=dict(), - volume_size=dict(type='int'), + volume_size=dict(type="int"), volume_type=dict(), ), ), @@ -703,39 +747,39 @@ def main(): ), ), cpu_options=dict( - type='dict', + type="dict", options=dict( - core_count=dict(type='int'), - threads_per_core=dict(type='int'), + core_count=dict(type="int"), + threads_per_core=dict(type="int"), ), ), credit_specification=dict( - dict(type='dict'), + dict(type="dict"), options=dict( cpu_credits=dict(), ), ), - disable_api_termination=dict(type='bool'), - ebs_optimized=dict(type='bool'), + disable_api_termination=dict(type="bool"), + ebs_optimized=dict(type="bool"), elastic_gpu_specifications=dict( options=dict(type=dict()), - type='list', - elements='dict', + type="list", + elements="dict", ), iam_instance_profile=dict(), image_id=dict(), - instance_initiated_shutdown_behavior=dict(choices=['stop', 'terminate']), + instance_initiated_shutdown_behavior=dict(choices=["stop", "terminate"]), instance_market_options=dict( - type='dict', + type="dict", options=dict( market_type=dict(), spot_options=dict( - type='dict', + type="dict", options=dict( - block_duration_minutes=dict(type='int'), - instance_interruption_behavior=dict(choices=['hibernate', 'stop', 'terminate']), + block_duration_minutes=dict(type="int"), + instance_interruption_behavior=dict(choices=["hibernate", "stop", "terminate"]), max_price=dict(), - spot_instance_type=dict(choices=['one-time', 'persistent']), + spot_instance_type=dict(choices=["one-time", "persistent"]), ), ), ), @@ -744,32 +788,30 @@ def main(): kernel_id=dict(), key_name=dict(), monitoring=dict( - type='dict', - options=dict( - enabled=dict(type='bool') - ), + type="dict", + options=dict(enabled=dict(type="bool")), ), metadata_options=dict( - type='dict', + type="dict", options=dict( - http_endpoint=dict(choices=['enabled', 'disabled'], default='enabled'), - http_put_response_hop_limit=dict(type='int', default=1), - http_tokens=dict(choices=['optional', 'required'], default='optional'), - http_protocol_ipv6=dict(choices=['disabled', 'enabled'], default='disabled'), - instance_metadata_tags=dict(choices=['disabled', 'enabled'], default='disabled'), - ) + http_endpoint=dict(choices=["enabled", "disabled"], default="enabled"), + http_put_response_hop_limit=dict(type="int", default=1), + http_tokens=dict(choices=["optional", "required"], default="optional"), + http_protocol_ipv6=dict(choices=["disabled", "enabled"], default="disabled"), + instance_metadata_tags=dict(choices=["disabled", "enabled"], default="disabled"), + ), ), network_interfaces=dict( - type='list', - elements='dict', + type="list", + elements="dict", options=dict( - associate_public_ip_address=dict(type='bool'), - delete_on_termination=dict(type='bool'), + associate_public_ip_address=dict(type="bool"), + delete_on_termination=dict(type="bool"), description=dict(), - device_index=dict(type='int'), - groups=dict(type='list', elements='str'), - ipv6_address_count=dict(type='int'), - ipv6_addresses=dict(type='list', elements='str'), + device_index=dict(type="int"), + groups=dict(type="list", elements="str"), + ipv6_address_count=dict(type="int"), + ipv6_addresses=dict(type="list", elements="str"), network_interface_id=dict(), private_ip_address=dict(), subnet_id=dict(), @@ -783,12 +825,12 @@ def main(): host_id=dict(), tenancy=dict(), ), - type='dict', + type="dict", ), ram_disk_id=dict(), - security_group_ids=dict(type='list', elements='str'), - security_groups=dict(type='list', elements='str'), - tags=dict(type='dict', aliases=['resource_tags']), + security_group_ids=dict(type="list", elements="str"), + security_groups=dict(type="list", elements="str"), + tags=dict(type="dict", aliases=["resource_tags"]), user_data=dict(), ) @@ -806,25 +848,25 @@ def main(): module = AnsibleAWSModule( argument_spec=arg_spec, required_one_of=[ - ('template_name', 'template_id') + ("template_name", "template_id"), ], - supports_check_mode=True + supports_check_mode=True, ) - for interface in (module.params.get('network_interfaces') or []): - if interface.get('ipv6_addresses'): - interface['ipv6_addresses'] = [{'ipv6_address': x} for x in interface['ipv6_addresses']] + for interface in module.params.get("network_interfaces") or []: + if interface.get("ipv6_addresses"): + interface["ipv6_addresses"] = [{"ipv6_address": x} for x in interface["ipv6_addresses"]] - if module.params.get('state') == 'present': + if module.params.get("state") == "present": out = create_or_update(module, template_options) out.update(format_module_output(module)) - elif module.params.get('state') == 'absent': + elif module.params.get("state") == "absent": out = delete_template(module) else: - module.fail_json(msg='Unsupported value "{0}" for `state` parameter'.format(module.params.get('state'))) + module.fail_json(msg='Unsupported value "{0}" for `state` parameter'.format(module.params.get("state"))) module.exit_json(**out) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ec2_placement_group.py b/plugins/modules/ec2_placement_group.py index 8687ded59d1..4e1967c846d 100644 --- a/plugins/modules/ec2_placement_group.py +++ b/plugins/modules/ec2_placement_group.py @@ -118,40 +118,32 @@ def search_placement_group(connection, module): """ name = module.params.get("name") try: - response = connection.describe_placement_groups( - Filters=[{ - "Name": "group-name", - "Values": [name] - }]) + response = connection.describe_placement_groups(Filters=[{"Name": "group-name", "Values": [name]}]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, - msg="Couldn't find placement group named [%s]" % name) + module.fail_json_aws(e, msg="Couldn't find placement group named [%s]" % name) - if len(response['PlacementGroups']) != 1: + if len(response["PlacementGroups"]) != 1: return None else: - placement_group = response['PlacementGroups'][0] + placement_group = response["PlacementGroups"][0] return { - "name": placement_group['GroupName'], - "state": placement_group['State'], - "strategy": placement_group['Strategy'], + "name": placement_group["GroupName"], + "state": placement_group["State"], + "strategy": placement_group["Strategy"], } -@AWSRetry.exponential_backoff(catch_extra_error_codes=['InvalidPlacementGroup.Unknown']) +@AWSRetry.exponential_backoff(catch_extra_error_codes=["InvalidPlacementGroup.Unknown"]) def get_placement_group_information(connection, name): """ Retrieve information about a placement group. """ - response = connection.describe_placement_groups( - GroupNames=[name] - ) - placement_group = response['PlacementGroups'][0] + response = connection.describe_placement_groups(GroupNames=[name]) + placement_group = response["PlacementGroups"][0] return { - "name": placement_group['GroupName'], - "state": placement_group['State'], - "strategy": placement_group['Strategy'], + "name": placement_group["GroupName"], + "state": placement_group["State"], + "strategy": placement_group["Strategy"], } @@ -161,32 +153,34 @@ def create_placement_group(connection, module): strategy = module.params.get("strategy") partition_count = module.params.get("partition_count") - if strategy != 'partition' and partition_count: - module.fail_json( - msg="'partition_count' can only be set when strategy is set to 'partition'.") + if strategy != "partition" and partition_count: + module.fail_json(msg="'partition_count' can only be set when strategy is set to 'partition'.") params = {} - params['GroupName'] = name - params['Strategy'] = strategy + params["GroupName"] = name + params["Strategy"] = strategy if partition_count: - params['PartitionCount'] = partition_count - params['DryRun'] = module.check_mode + params["PartitionCount"] = partition_count + params["DryRun"] = module.check_mode try: connection.create_placement_group(**params) - except is_boto3_error_code('DryRunOperation'): - module.exit_json(changed=True, placement_group={ - "name": name, - "state": 'DryRun', - "strategy": strategy, - }) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws( - e, - msg="Couldn't create placement group [%s]" % name) - - module.exit_json(changed=True, - placement_group=get_placement_group_information(connection, name)) + except is_boto3_error_code("DryRunOperation"): + module.exit_json( + changed=True, + placement_group={ + "name": name, + "state": "DryRun", + "strategy": strategy, + }, + ) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't create placement group [%s]" % name) + + module.exit_json(changed=True, placement_group=get_placement_group_information(connection, name)) @AWSRetry.exponential_backoff() @@ -194,52 +188,44 @@ def delete_placement_group(connection, module): name = module.params.get("name") try: - connection.delete_placement_group( - GroupName=name, DryRun=module.check_mode) + connection.delete_placement_group(GroupName=name, DryRun=module.check_mode) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, - msg="Couldn't delete placement group [%s]" % name) + module.fail_json_aws(e, msg="Couldn't delete placement group [%s]" % name) module.exit_json(changed=True) def main(): argument_spec = dict( - name=dict(required=True, type='str'), - partition_count=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']), - strategy=dict(default='cluster', choices=['cluster', 'spread', 'partition']) + name=dict(required=True, type="str"), + partition_count=dict(type="int"), + state=dict(default="present", choices=["present", "absent"]), + strategy=dict(default="cluster", choices=["cluster", "spread", "partition"]), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('ec2') + connection = module.client("ec2") state = module.params.get("state") - if state == 'present': + if state == "present": placement_group = search_placement_group(connection, module) if placement_group is None: create_placement_group(connection, module) else: strategy = module.params.get("strategy") - if placement_group['strategy'] == strategy: - module.exit_json( - changed=False, placement_group=placement_group) + if placement_group["strategy"] == strategy: + module.exit_json(changed=False, placement_group=placement_group) else: name = module.params.get("name") module.fail_json( - msg=("Placement group '{}' exists, can't change strategy" + - " from '{}' to '{}'").format( - name, - placement_group['strategy'], - strategy)) + msg=("Placement group '{}' exists, can't change strategy" + " from '{}' to '{}'").format( + name, placement_group["strategy"], strategy + ) + ) - elif state == 'absent': + elif state == "absent": placement_group = search_placement_group(connection, module) if placement_group is None: module.exit_json(changed=False) @@ -247,5 +233,5 @@ def main(): delete_placement_group(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ec2_placement_group_info.py b/plugins/modules/ec2_placement_group_info.py index bc9d717e49d..970cd302636 100644 --- a/plugins/modules/ec2_placement_group_info.py +++ b/plugins/modules/ec2_placement_group_info.py @@ -85,42 +85,45 @@ def get_placement_groups_details(connection, module): try: if len(names) > 0: response = connection.describe_placement_groups( - Filters=[{ - "Name": "group-name", - "Values": names - }]) + Filters=[ + { + "Name": "group-name", + "Values": names, + } + ] + ) else: response = connection.describe_placement_groups() except (BotoCoreError, ClientError) as e: - module.fail_json_aws( - e, - msg="Couldn't find placement groups named [%s]" % names) + module.fail_json_aws(e, msg="Couldn't find placement groups named [%s]" % names) results = [] - for placement_group in response['PlacementGroups']: - results.append({ - "name": placement_group['GroupName'], - "state": placement_group['State'], - "strategy": placement_group['Strategy'], - }) + for placement_group in response["PlacementGroups"]: + results.append( + { + "name": placement_group["GroupName"], + "state": placement_group["State"], + "strategy": placement_group["Strategy"], + } + ) return results def main(): argument_spec = dict( - names=dict(type='list', default=[], elements='str') + names=dict(type="list", default=[], elements="str"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, ) - connection = module.client('ec2') + connection = module.client("ec2") placement_groups = get_placement_groups_details(connection, module) module.exit_json(changed=False, placement_groups=placement_groups) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ec2_snapshot_copy.py b/plugins/modules/ec2_snapshot_copy.py index 59d0582c048..ce73191cb79 100644 --- a/plugins/modules/ec2_snapshot_copy.py +++ b/plugins/modules/ec2_snapshot_copy.py @@ -126,34 +126,33 @@ def copy_snapshot(module, ec2): """ params = { - 'SourceRegion': module.params.get('source_region'), - 'SourceSnapshotId': module.params.get('source_snapshot_id'), - 'Description': module.params.get('description') + "SourceRegion": module.params.get("source_region"), + "SourceSnapshotId": module.params.get("source_snapshot_id"), + "Description": module.params.get("description"), } - if module.params.get('encrypted'): - params['Encrypted'] = True + if module.params.get("encrypted"): + params["Encrypted"] = True - if module.params.get('kms_key_id'): - params['KmsKeyId'] = module.params.get('kms_key_id') + if module.params.get("kms_key_id"): + params["KmsKeyId"] = module.params.get("kms_key_id") - if module.params.get('tags'): - params['TagSpecifications'] = boto3_tag_specifications(module.params.get('tags'), types=['snapshot']) + if module.params.get("tags"): + params["TagSpecifications"] = boto3_tag_specifications(module.params.get("tags"), types=["snapshot"]) try: - snapshot_id = ec2.copy_snapshot(**params)['SnapshotId'] - if module.params.get('wait'): + snapshot_id = ec2.copy_snapshot(**params)["SnapshotId"] + if module.params.get("wait"): delay = 15 # Add one to max_attempts as wait() increment # its counter before assessing it for time.sleep() - max_attempts = (module.params.get('wait_timeout') // delay) + 1 - ec2.get_waiter('snapshot_completed').wait( - SnapshotIds=[snapshot_id], - WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts) + max_attempts = (module.params.get("wait_timeout") // delay) + 1 + ec2.get_waiter("snapshot_completed").wait( + SnapshotIds=[snapshot_id], WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts) ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='An error occurred waiting for the snapshot to become available.') + module.fail_json_aws(e, msg="An error occurred waiting for the snapshot to become available.") module.exit_json(changed=True, snapshot_id=snapshot_id) @@ -162,23 +161,23 @@ def main(): argument_spec = dict( source_region=dict(required=True), source_snapshot_id=dict(required=True), - description=dict(default=''), - encrypted=dict(type='bool', default=False, required=False), - kms_key_id=dict(type='str', required=False), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=600), - tags=dict(type='dict', aliases=['resource_tags']), + description=dict(default=""), + encrypted=dict(type="bool", default=False, required=False), + kms_key_id=dict(type="str", required=False), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=600), + tags=dict(type="dict", aliases=["resource_tags"]), ) module = AnsibleAWSModule(argument_spec=argument_spec) try: - client = module.client('ec2') + client = module.client("ec2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") copy_snapshot(module, client) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ec2_transit_gateway.py b/plugins/modules/ec2_transit_gateway.py index 832d16defc9..8c6282d0b0f 100644 --- a/plugins/modules/ec2_transit_gateway.py +++ b/plugins/modules/ec2_transit_gateway.py @@ -230,32 +230,31 @@ class AnsibleEc2Tgw(object): - def __init__(self, module, results): self._module = module self._results = results retry_decorator = AWSRetry.jittered_backoff( - catch_extra_error_codes=['IncorrectState'], + catch_extra_error_codes=["IncorrectState"], ) - connection = module.client('ec2', retry_decorator=retry_decorator) + connection = module.client("ec2", retry_decorator=retry_decorator) self._connection = connection self._check_mode = self._module.check_mode def process(self): - """ Process the request based on state parameter . - state = present will search for an existing tgw based and return the object data. - if no object is found it will be created - - state = absent will attempt to remove the tgw however will fail if it still has - attachments or associations - """ - description = self._module.params.get('description') - state = self._module.params.get('state', 'present') - tgw_id = self._module.params.get('transit_gateway_id') - - if state == 'present': + """Process the request based on state parameter . + state = present will search for an existing tgw based and return the object data. + if no object is found it will be created + + state = absent will attempt to remove the tgw however will fail if it still has + attachments or associations + """ + description = self._module.params.get("description") + state = self._module.params.get("state", "present") + tgw_id = self._module.params.get("transit_gateway_id") + + if state == "present": self.ensure_tgw_present(tgw_id, description) - elif state == 'absent': + elif state == "absent": self.ensure_tgw_absent(tgw_id, description) def wait_for_status(self, wait_timeout, tgw_id, status, skip_deleted=True): @@ -279,13 +278,13 @@ def wait_for_status(self, wait_timeout, tgw_id, status, skip_deleted=True): if transit_gateway: if self._check_mode: - transit_gateway['state'] = status + transit_gateway["state"] = status - if transit_gateway.get('state') == status: + if transit_gateway.get("state") == status: status_achieved = True break - elif transit_gateway.get('state') == 'failed': + elif transit_gateway.get("state") == "failed": break else: @@ -295,13 +294,12 @@ def wait_for_status(self, wait_timeout, tgw_id, status, skip_deleted=True): self._module.fail_json_aws(e) if not status_achieved: - self._module.fail_json( - msg="Wait time out reached, while waiting for results") + self._module.fail_json(msg="Wait time out reached, while waiting for results") return transit_gateway def get_matching_tgw(self, tgw_id, description=None, skip_deleted=True): - """ search for an existing tgw by either tgw_id or description + """search for an existing tgw by either tgw_id or description :param tgw_id: The AWS id of the transit gateway :param description: The description of the transit gateway. :param skip_deleted: ignore deleted transit gateways @@ -309,7 +307,7 @@ def get_matching_tgw(self, tgw_id, description=None, skip_deleted=True): """ filters = [] if tgw_id: - filters = ansible_dict_to_boto3_filter_list({'transit-gateway-id': tgw_id}) + filters = ansible_dict_to_boto3_filter_list({"transit-gateway-id": tgw_id}) try: response = AWSRetry.exponential_backoff()(self._connection.describe_transit_gateways)(Filters=filters) @@ -319,20 +317,21 @@ def get_matching_tgw(self, tgw_id, description=None, skip_deleted=True): tgw = None tgws = [] - if len(response.get('TransitGateways', [])) == 1 and tgw_id: - if (response['TransitGateways'][0]['State'] != 'deleted') or not skip_deleted: - tgws.extend(response['TransitGateways']) + if len(response.get("TransitGateways", [])) == 1 and tgw_id: + if (response["TransitGateways"][0]["State"] != "deleted") or not skip_deleted: + tgws.extend(response["TransitGateways"]) - for gateway in response.get('TransitGateways', []): - if description == gateway['Description'] and gateway['State'] != 'deleted': + for gateway in response.get("TransitGateways", []): + if description == gateway["Description"] and gateway["State"] != "deleted": tgws.append(gateway) if len(tgws) > 1: self._module.fail_json( - msg='EC2 returned more than one transit Gateway for description {0}, aborting'.format(description)) + msg="EC2 returned more than one transit Gateway for description {0}, aborting".format(description) + ) elif tgws: - tgw = camel_dict_to_snake_dict(tgws[0], ignore_list=['Tags']) - tgw['tags'] = boto3_tag_list_to_ansible_dict(tgws[0]['Tags']) + tgw = camel_dict_to_snake_dict(tgws[0], ignore_list=["Tags"]) + tgw["tags"] = boto3_tag_list_to_ansible_dict(tgws[0]["Tags"]) return tgw @@ -352,31 +351,31 @@ def create_tgw(self, description): :return dict: transit gateway object """ options = dict() - wait = self._module.params.get('wait') - wait_timeout = self._module.params.get('wait_timeout') + wait = self._module.params.get("wait") + wait_timeout = self._module.params.get("wait_timeout") - if self._module.params.get('asn'): - options['AmazonSideAsn'] = self._module.params.get('asn') + if self._module.params.get("asn"): + options["AmazonSideAsn"] = self._module.params.get("asn") - options['AutoAcceptSharedAttachments'] = self.enable_option_flag(self._module.params.get('auto_attach')) - options['DefaultRouteTableAssociation'] = self.enable_option_flag(self._module.params.get('auto_associate')) - options['DefaultRouteTablePropagation'] = self.enable_option_flag(self._module.params.get('auto_propagate')) - options['VpnEcmpSupport'] = self.enable_option_flag(self._module.params.get('vpn_ecmp_support')) - options['DnsSupport'] = self.enable_option_flag(self._module.params.get('dns_support')) + options["AutoAcceptSharedAttachments"] = self.enable_option_flag(self._module.params.get("auto_attach")) + options["DefaultRouteTableAssociation"] = self.enable_option_flag(self._module.params.get("auto_associate")) + options["DefaultRouteTablePropagation"] = self.enable_option_flag(self._module.params.get("auto_propagate")) + options["VpnEcmpSupport"] = self.enable_option_flag(self._module.params.get("vpn_ecmp_support")) + options["DnsSupport"] = self.enable_option_flag(self._module.params.get("dns_support")) try: response = self._connection.create_transit_gateway(Description=description, Options=options) except (ClientError, BotoCoreError) as e: self._module.fail_json_aws(e) - tgw_id = response['TransitGateway']['TransitGatewayId'] + tgw_id = response["TransitGateway"]["TransitGatewayId"] if wait: result = self.wait_for_status(wait_timeout=wait_timeout, tgw_id=tgw_id, status="available") else: result = self.get_matching_tgw(tgw_id=tgw_id) - self._results['msg'] = (' Transit gateway {0} created'.format(result['transit_gateway_id'])) + self._results["msg"] = " Transit gateway {0} created".format(result["transit_gateway_id"]) return result @@ -387,8 +386,8 @@ def delete_tgw(self, tgw_id): :param tgw_id: The id of the transit gateway :return dict: transit gateway object """ - wait = self._module.params.get('wait') - wait_timeout = self._module.params.get('wait_timeout') + wait = self._module.params.get("wait") + wait_timeout = self._module.params.get("wait_timeout") try: response = self._connection.delete_transit_gateway(TransitGatewayId=tgw_id) @@ -396,11 +395,13 @@ def delete_tgw(self, tgw_id): self._module.fail_json_aws(e) if wait: - result = self.wait_for_status(wait_timeout=wait_timeout, tgw_id=tgw_id, status="deleted", skip_deleted=False) + result = self.wait_for_status( + wait_timeout=wait_timeout, tgw_id=tgw_id, status="deleted", skip_deleted=False + ) else: result = self.get_matching_tgw(tgw_id=tgw_id, skip_deleted=False) - self._results['msg'] = (' Transit gateway {0} deleted'.format(tgw_id)) + self._results["msg"] = " Transit gateway {0} deleted".format(tgw_id) return result @@ -417,25 +418,27 @@ def ensure_tgw_present(self, tgw_id=None, description=None): if tgw is None: if self._check_mode: - self._results['changed'] = True - self._results['transit_gateway_id'] = None + self._results["changed"] = True + self._results["transit_gateway_id"] = None return self._results try: if not description: self._module.fail_json(msg="Failed to create Transit Gateway: description argument required") tgw = self.create_tgw(description) - self._results['changed'] = True + self._results["changed"] = True except (BotoCoreError, ClientError) as e: - self._module.fail_json_aws(e, msg='Unable to create Transit Gateway') - - self._results['changed'] |= ensure_ec2_tags( - self._connection, self._module, tgw['transit_gateway_id'], - tags=self._module.params.get('tags'), - purge_tags=self._module.params.get('purge_tags'), + self._module.fail_json_aws(e, msg="Unable to create Transit Gateway") + + self._results["changed"] |= ensure_ec2_tags( + self._connection, + self._module, + tgw["transit_gateway_id"], + tags=self._module.params.get("tags"), + purge_tags=self._module.params.get("purge_tags"), ) - self._results['transit_gateway'] = self.get_matching_tgw(tgw_id=tgw['transit_gateway_id']) + self._results["transit_gateway"] = self.get_matching_tgw(tgw_id=tgw["transit_gateway_id"]) return self._results @@ -447,21 +450,22 @@ def ensure_tgw_absent(self, tgw_id=None, description=None): :param description: The description of the transit gateway. :return doct: transit gateway object """ - self._results['transit_gateway_id'] = None + self._results["transit_gateway_id"] = None tgw = self.get_matching_tgw(tgw_id, description) if tgw is not None: if self._check_mode: - self._results['changed'] = True + self._results["changed"] = True return self._results try: - tgw = self.delete_tgw(tgw_id=tgw['transit_gateway_id']) - self._results['changed'] = True - self._results['transit_gateway'] = self.get_matching_tgw(tgw_id=tgw['transit_gateway_id'], - skip_deleted=False) + tgw = self.delete_tgw(tgw_id=tgw["transit_gateway_id"]) + self._results["changed"] = True + self._results["transit_gateway"] = self.get_matching_tgw( + tgw_id=tgw["transit_gateway_id"], skip_deleted=False + ) except (BotoCoreError, ClientError) as e: - self._module.fail_json_aws(e, msg='Unable to delete Transit Gateway') + self._module.fail_json_aws(e, msg="Unable to delete Transit Gateway") return self._results @@ -473,24 +477,24 @@ def setup_module_object(): """ argument_spec = dict( - asn=dict(type='int'), - auto_associate=dict(type='bool', default=True), - auto_attach=dict(type='bool', default=False), - auto_propagate=dict(type='bool', default=True), - description=dict(type='str'), - dns_support=dict(type='bool', default=True), - purge_tags=dict(type='bool', default=True), - state=dict(default='present', choices=['present', 'absent']), - tags=dict(type='dict', aliases=['resource_tags']), - transit_gateway_id=dict(type='str'), - vpn_ecmp_support=dict(type='bool', default=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=300) + asn=dict(type="int"), + auto_associate=dict(type="bool", default=True), + auto_attach=dict(type="bool", default=False), + auto_propagate=dict(type="bool", default=True), + description=dict(type="str"), + dns_support=dict(type="bool", default=True), + purge_tags=dict(type="bool", default=True), + state=dict(default="present", choices=["present", "absent"]), + tags=dict(type="dict", aliases=["resource_tags"]), + transit_gateway_id=dict(type="str"), + vpn_ecmp_support=dict(type="bool", default=True), + wait=dict(type="bool", default=True), + wait_timeout=dict(type="int", default=300), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_one_of=[('description', 'transit_gateway_id')], + required_one_of=[("description", "transit_gateway_id")], supports_check_mode=True, ) @@ -498,12 +502,9 @@ def setup_module_object(): def main(): - module = setup_module_object() - results = dict( - changed=False - ) + results = dict(changed=False) tgw_manager = AnsibleEc2Tgw(module=module, results=results) tgw_manager.process() @@ -511,5 +512,5 @@ def main(): module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ec2_transit_gateway_info.py b/plugins/modules/ec2_transit_gateway_info.py index 5053c8d65d8..b25346b84b8 100644 --- a/plugins/modules/ec2_transit_gateway_info.py +++ b/plugins/modules/ec2_transit_gateway_info.py @@ -177,11 +177,10 @@ class AnsibleEc2TgwInfo(object): - def __init__(self, module, results): self._module = module self._results = results - self._connection = self._module.client('ec2') + self._connection = self._module.client("ec2") self._check_mode = self._module.check_mode @AWSRetry.exponential_backoff() @@ -193,8 +192,8 @@ def describe_transit_gateways(self): connection : boto3 client connection object """ # collect parameters - filters = ansible_dict_to_boto3_filter_list(self._module.params['filters']) - transit_gateway_ids = self._module.params['transit_gateway_ids'] + filters = ansible_dict_to_boto3_filter_list(self._module.params["filters"]) + transit_gateway_ids = self._module.params["transit_gateway_ids"] # init empty list for return vars transit_gateway_info = list() @@ -202,17 +201,18 @@ def describe_transit_gateways(self): # Get the basic transit gateway info try: response = self._connection.describe_transit_gateways( - TransitGatewayIds=transit_gateway_ids, Filters=filters) - except is_boto3_error_code('InvalidTransitGatewayID.NotFound'): - self._results['transit_gateways'] = [] + TransitGatewayIds=transit_gateway_ids, Filters=filters + ) + except is_boto3_error_code("InvalidTransitGatewayID.NotFound"): + self._results["transit_gateways"] = [] return - for transit_gateway in response['TransitGateways']: - transit_gateway_info.append(camel_dict_to_snake_dict(transit_gateway, ignore_list=['Tags'])) + for transit_gateway in response["TransitGateways"]: + transit_gateway_info.append(camel_dict_to_snake_dict(transit_gateway, ignore_list=["Tags"])) # convert tag list to ansible dict - transit_gateway_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(transit_gateway.get('Tags', [])) + transit_gateway_info[-1]["tags"] = boto3_tag_list_to_ansible_dict(transit_gateway.get("Tags", [])) - self._results['transit_gateways'] = transit_gateway_info + self._results["transit_gateways"] = transit_gateway_info return @@ -223,8 +223,8 @@ def setup_module_object(): """ argument_spec = dict( - transit_gateway_ids=dict(type='list', default=[], elements='str', aliases=['transit_gateway_id']), - filters=dict(type='dict', default={}) + transit_gateway_ids=dict(type="list", default=[], elements="str", aliases=["transit_gateway_id"]), + filters=dict(type="dict", default={}), ) module = AnsibleAWSModule( @@ -236,12 +236,9 @@ def setup_module_object(): def main(): - module = setup_module_object() - results = dict( - changed=False - ) + results = dict(changed=False) tgwf_manager = AnsibleEc2TgwInfo(module=module, results=results) try: @@ -252,5 +249,5 @@ def main(): module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ec2_transit_gateway_vpc_attachment.py b/plugins/modules/ec2_transit_gateway_vpc_attachment.py index 2878fbf9129..301fefb0513 100644 --- a/plugins/modules/ec2_transit_gateway_vpc_attachment.py +++ b/plugins/modules/ec2_transit_gateway_vpc_attachment.py @@ -221,25 +221,24 @@ def main(): - argument_spec = dict( - state=dict(type='str', required=False, default='present', choices=['absent', 'present']), - transit_gateway=dict(type='str', required=False, aliases=['transit_gateway_id']), - id=dict(type='str', required=False, aliases=['attachment_id']), - name=dict(type='str', required=False), - subnets=dict(type='list', elements='str', required=False), - purge_subnets=dict(type='bool', required=False, default=True), - tags=dict(type='dict', required=False, aliases=['resource_tags']), - purge_tags=dict(type='bool', required=False, default=True), - appliance_mode_support=dict(type='bool', required=False), - dns_support=dict(type='bool', required=False), - ipv6_support=dict(type='bool', required=False), - wait=dict(type='bool', required=False, default=True), - wait_timeout=dict(type='int', required=False), + state=dict(type="str", required=False, default="present", choices=["absent", "present"]), + transit_gateway=dict(type="str", required=False, aliases=["transit_gateway_id"]), + id=dict(type="str", required=False, aliases=["attachment_id"]), + name=dict(type="str", required=False), + subnets=dict(type="list", elements="str", required=False), + purge_subnets=dict(type="bool", required=False, default=True), + tags=dict(type="dict", required=False, aliases=["resource_tags"]), + purge_tags=dict(type="bool", required=False, default=True), + appliance_mode_support=dict(type="bool", required=False), + dns_support=dict(type="bool", required=False), + ipv6_support=dict(type="bool", required=False), + wait=dict(type="bool", required=False, default=True), + wait_timeout=dict(type="int", required=False), ) one_of = [ - ['id', 'transit_gateway', 'name'], + ["id", "transit_gateway", "name"], ] module = AnsibleAWSModule( @@ -248,55 +247,68 @@ def main(): required_one_of=one_of, ) - attach_id = module.params.get('id', None) - tgw = module.params.get('transit_gateway', None) - name = module.params.get('name', None) - tags = module.params.get('tags', None) - purge_tags = module.params.get('purge_tags') - state = module.params.get('state') - subnets = module.params.get('subnets', None) - purge_subnets = module.params.get('purge_subnets') + attach_id = module.params.get("id", None) + tgw = module.params.get("transit_gateway", None) + name = module.params.get("name", None) + tags = module.params.get("tags", None) + purge_tags = module.params.get("purge_tags") + state = module.params.get("state") + subnets = module.params.get("subnets", None) + purge_subnets = module.params.get("purge_subnets") # When not provided with an ID see if one exists. if not attach_id: search_manager = TransitGatewayVpcAttachmentManager(module=module) filters = dict() if tgw: - filters['transit-gateway-id'] = tgw + filters["transit-gateway-id"] = tgw if name: - filters['tag:Name'] = name + filters["tag:Name"] = name if subnets: vpc_id = search_manager.subnets_to_vpc(subnets) - filters['vpc-id'] = vpc_id + filters["vpc-id"] = vpc_id # Attachments lurk in a 'deleted' state, for a while, ignore them so we # can reuse the names - filters['state'] = [ - 'available', 'deleting', 'failed', 'failing', 'initiatingRequest', 'modifying', - 'pendingAcceptance', 'pending', 'rollingBack', 'rejected', 'rejecting' + filters["state"] = [ + "available", + "deleting", + "failed", + "failing", + "initiatingRequest", + "modifying", + "pendingAcceptance", + "pending", + "rollingBack", + "rejected", + "rejecting", ] attachments = search_manager.list(filters=filters) if len(attachments) > 1: - module.fail_json('Multiple matching attachments found, provide an ID', attachments=attachments) + module.fail_json("Multiple matching attachments found, provide an ID", attachments=attachments) # If we find a match then we'll modify it by ID, otherwise we'll be # creating a new RTB. if attachments: - attach_id = attachments[0]['transit_gateway_attachment_id'] + attach_id = attachments[0]["transit_gateway_attachment_id"] manager = TransitGatewayVpcAttachmentManager(module=module, id=attach_id) - manager.set_wait(module.params.get('wait', None)) - manager.set_wait_timeout(module.params.get('wait_timeout', None)) + manager.set_wait(module.params.get("wait", None)) + manager.set_wait_timeout(module.params.get("wait_timeout", None)) - if state == 'absent': + if state == "absent": manager.delete() else: if not attach_id: if not tgw: - module.fail_json('No existing attachment found. To create a new attachment' - ' the `transit_gateway` parameter must be provided.') + module.fail_json( + "No existing attachment found. To create a new attachment" + " the `transit_gateway` parameter must be provided." + ) if not subnets: - module.fail_json('No existing attachment found. To create a new attachment' - ' the `subnets` parameter must be provided.') + module.fail_json( + "No existing attachment found. To create a new attachment" + " the `subnets` parameter must be provided." + ) # name is just a special case of tags. if name: @@ -310,9 +322,9 @@ def main(): manager.set_transit_gateway(tgw) manager.set_subnets(subnets, purge_subnets) manager.set_tags(tags, purge_tags) - manager.set_dns_support(module.params.get('dns_support', None)) - manager.set_ipv6_support(module.params.get('ipv6_support', None)) - manager.set_appliance_mode_support(module.params.get('appliance_mode_support', None)) + manager.set_dns_support(module.params.get("dns_support", None)) + manager.set_ipv6_support(module.params.get("ipv6_support", None)) + manager.set_appliance_mode_support(module.params.get("appliance_mode_support", None)) manager.flush_changes() results = dict( @@ -320,7 +332,7 @@ def main(): attachments=[manager.updated_resource], ) if manager.changed: - results['diff'] = dict( + results["diff"] = dict( before=manager.original_resource, after=manager.updated_resource, ) @@ -328,5 +340,5 @@ def main(): module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ec2_transit_gateway_vpc_attachment_info.py b/plugins/modules/ec2_transit_gateway_vpc_attachment_info.py index 49c03ff432c..a665e4080cc 100644 --- a/plugins/modules/ec2_transit_gateway_vpc_attachment_info.py +++ b/plugins/modules/ec2_transit_gateway_vpc_attachment_info.py @@ -147,17 +147,16 @@ def main(): - argument_spec = dict( - id=dict(type='str', required=False, aliases=['attachment_id']), - name=dict(type='str', required=False), - filters=dict(type='dict', required=False), - include_deleted=dict(type='bool', required=False, default=False) + id=dict(type="str", required=False, aliases=["attachment_id"]), + name=dict(type="str", required=False), + filters=dict(type="dict", required=False), + include_deleted=dict(type="bool", required=False, default=False), ) mutually_exclusive = [ - ['id', 'name'], - ['id', 'filters'], + ["id", "name"], + ["id", "filters"], ] module = AnsibleAWSModule( @@ -165,22 +164,31 @@ def main(): supports_check_mode=True, ) - name = module.params.get('name', None) - id = module.params.get('id', None) - opt_filters = module.params.get('filters', None) + name = module.params.get("name", None) + id = module.params.get("id", None) + opt_filters = module.params.get("filters", None) search_manager = TransitGatewayVpcAttachmentManager(module=module) filters = dict() if name: - filters['tag:Name'] = name + filters["tag:Name"] = name - if not module.params.get('include_deleted'): + if not module.params.get("include_deleted"): # Attachments lurk in a 'deleted' state, for a while, ignore them so we # can reuse the names - filters['state'] = [ - 'available', 'deleting', 'failed', 'failing', 'initiatingRequest', 'modifying', - 'pendingAcceptance', 'pending', 'rollingBack', 'rejected', 'rejecting' + filters["state"] = [ + "available", + "deleting", + "failed", + "failing", + "initiatingRequest", + "modifying", + "pendingAcceptance", + "pending", + "rollingBack", + "rejected", + "rejecting", ] if opt_filters: @@ -191,5 +199,5 @@ def main(): module.exit_json(changed=False, attachments=attachments, filters=filters) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ec2_vpc_egress_igw.py b/plugins/modules/ec2_vpc_egress_igw.py index b6fb0b837f1..b15bec20f06 100644 --- a/plugins/modules/ec2_vpc_egress_igw.py +++ b/plugins/modules/ec2_vpc_egress_igw.py @@ -81,16 +81,20 @@ def delete_eigw(module, connection, eigw_id): try: response = connection.delete_egress_only_internet_gateway( - aws_retry=True, - DryRun=module.check_mode, - EgressOnlyInternetGatewayId=eigw_id) - except is_boto3_error_code('DryRunOperation'): + aws_retry=True, DryRun=module.check_mode, EgressOnlyInternetGatewayId=eigw_id + ) + except is_boto3_error_code("DryRunOperation"): changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Could not delete Egress-Only Internet Gateway {0} from VPC {1}".format(eigw_id, module.vpc_id)) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, msg="Could not delete Egress-Only Internet Gateway {0} from VPC {1}".format(eigw_id, module.vpc_id) + ) if not module.check_mode: - changed = response.get('ReturnCode', False) + changed = response.get("ReturnCode", False) return changed @@ -108,29 +112,35 @@ def create_eigw(module, connection, vpc_id): try: response = connection.create_egress_only_internet_gateway( - aws_retry=True, - DryRun=module.check_mode, - VpcId=vpc_id) - except is_boto3_error_code('DryRunOperation'): + aws_retry=True, DryRun=module.check_mode, VpcId=vpc_id + ) + except is_boto3_error_code("DryRunOperation"): # When boto3 method is run with DryRun=True it returns an error on success # We need to catch the error and return something valid changed = True - except is_boto3_error_code('InvalidVpcID.NotFound') as e: # pylint: disable=duplicate-except + except is_boto3_error_code("InvalidVpcID.NotFound") as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="invalid vpc ID '{0}' provided".format(vpc_id)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Could not create Egress-Only Internet Gateway for vpc ID {0}".format(vpc_id)) if not module.check_mode: - gateway = response.get('EgressOnlyInternetGateway', {}) - state = gateway.get('Attachments', [{}])[0].get('State') - gateway_id = gateway.get('EgressOnlyInternetGatewayId') + gateway = response.get("EgressOnlyInternetGateway", {}) + state = gateway.get("Attachments", [{}])[0].get("State") + gateway_id = gateway.get("EgressOnlyInternetGatewayId") - if gateway_id and state in ('attached', 'attaching'): + if gateway_id and state in ("attached", "attaching"): changed = True else: # EIGW gave back a bad attachment state or an invalid response so we error out - module.fail_json(msg='Unable to create and attach Egress Only Internet Gateway to VPCId: {0}. Bad or no state in response'.format(vpc_id), - **camel_dict_to_snake_dict(response)) + module.fail_json( + msg="Unable to create and attach Egress Only Internet Gateway to VPCId: {0}. Bad or no state in response".format( + vpc_id + ), + **camel_dict_to_snake_dict(response), + ) return changed, gateway_id @@ -146,45 +156,41 @@ def describe_eigws(module, connection, vpc_id): gateway_id = None try: - response = connection.describe_egress_only_internet_gateways( - aws_retry=True) + response = connection.describe_egress_only_internet_gateways(aws_retry=True) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Could not get list of existing Egress-Only Internet Gateways") - for eigw in response.get('EgressOnlyInternetGateways', []): - for attachment in eigw.get('Attachments', []): - if attachment.get('VpcId') == vpc_id and attachment.get('State') in ('attached', 'attaching'): - gateway_id = eigw.get('EgressOnlyInternetGatewayId') + for eigw in response.get("EgressOnlyInternetGateways", []): + for attachment in eigw.get("Attachments", []): + if attachment.get("VpcId") == vpc_id and attachment.get("State") in ("attached", "attaching"): + gateway_id = eigw.get("EgressOnlyInternetGatewayId") return gateway_id def main(): - argument_spec = dict( - vpc_id=dict(required=True), - state=dict(default='present', choices=['present', 'absent']) - ) + argument_spec = dict(vpc_id=dict(required=True), state=dict(default="present", choices=["present", "absent"])) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) retry_decorator = AWSRetry.jittered_backoff(retries=10) - connection = module.client('ec2', retry_decorator=retry_decorator) + connection = module.client("ec2", retry_decorator=retry_decorator) - vpc_id = module.params.get('vpc_id') - state = module.params.get('state') + vpc_id = module.params.get("vpc_id") + state = module.params.get("state") eigw_id = describe_eigws(module, connection, vpc_id) result = dict(gateway_id=eigw_id, vpc_id=vpc_id) changed = False - if state == 'present' and not eigw_id: - changed, result['gateway_id'] = create_eigw(module, connection, vpc_id) - elif state == 'absent' and eigw_id: + if state == "present" and not eigw_id: + changed, result["gateway_id"] = create_eigw(module, connection, vpc_id) + elif state == "absent" and eigw_id: changed = delete_eigw(module, connection, eigw_id) module.exit_json(changed=changed, **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ec2_vpc_nacl.py b/plugins/modules/ec2_vpc_nacl.py index fa34ccd8118..022f058d0f9 100644 --- a/plugins/modules/ec2_vpc_nacl.py +++ b/plugins/modules/ec2_vpc_nacl.py @@ -167,33 +167,33 @@ # VPC-supported IANA protocol numbers # http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml -PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, 'ipv6-icmp': 58} +PROTOCOL_NUMBERS = {"all": -1, "icmp": 1, "tcp": 6, "udp": 17, "ipv6-icmp": 58} # Utility methods def icmp_present(entry): - if len(entry) == 6 and entry[1] in ['icmp', 'ipv6-icmp'] or entry[1] in [1, 58]: + if len(entry) == 6 and entry[1] in ["icmp", "ipv6-icmp"] or entry[1] in [1, 58]: return True def subnets_removed(nacl_id, subnets, client, module): results = find_acl_by_id(nacl_id, client, module) - associations = results['NetworkAcls'][0]['Associations'] - subnet_ids = [assoc['SubnetId'] for assoc in associations] + associations = results["NetworkAcls"][0]["Associations"] + subnet_ids = [assoc["SubnetId"] for assoc in associations] return [subnet for subnet in subnet_ids if subnet not in subnets] def subnets_added(nacl_id, subnets, client, module): results = find_acl_by_id(nacl_id, client, module) - associations = results['NetworkAcls'][0]['Associations'] - subnet_ids = [assoc['SubnetId'] for assoc in associations] + associations = results["NetworkAcls"][0]["Associations"] + subnet_ids = [assoc["SubnetId"] for assoc in associations] return [subnet for subnet in subnets if subnet not in subnet_ids] def subnets_changed(nacl, client, module): changed = False - vpc_id = module.params.get('vpc_id') - nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] + vpc_id = module.params.get("vpc_id") + nacl_id = nacl["NetworkAcls"][0]["NetworkAclId"] subnets = subnets_to_associate(nacl, client, module) if not subnets: default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0] @@ -219,40 +219,41 @@ def subnets_changed(nacl, client, module): def nacls_changed(nacl, client, module): changed = False params = dict() - params['egress'] = module.params.get('egress') - params['ingress'] = module.params.get('ingress') + params["egress"] = module.params.get("egress") + params["ingress"] = module.params.get("ingress") - nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] + nacl_id = nacl["NetworkAcls"][0]["NetworkAclId"] nacl = describe_network_acl(client, module) - entries = nacl['NetworkAcls'][0]['Entries'] - egress = [rule for rule in entries if rule['Egress'] is True and rule['RuleNumber'] < 32767] - ingress = [rule for rule in entries if rule['Egress'] is False and rule['RuleNumber'] < 32767] - if rules_changed(egress, params['egress'], True, nacl_id, client, module): + entries = nacl["NetworkAcls"][0]["Entries"] + egress = [rule for rule in entries if rule["Egress"] is True and rule["RuleNumber"] < 32767] + ingress = [rule for rule in entries if rule["Egress"] is False and rule["RuleNumber"] < 32767] + if rules_changed(egress, params["egress"], True, nacl_id, client, module): changed = True - if rules_changed(ingress, params['ingress'], False, nacl_id, client, module): + if rules_changed(ingress, params["ingress"], False, nacl_id, client, module): changed = True return changed def tags_changed(nacl_id, client, module): - tags = module.params.get('tags') - name = module.params.get('name') - purge_tags = module.params.get('purge_tags') + tags = module.params.get("tags") + name = module.params.get("name") + purge_tags = module.params.get("purge_tags") if name is None and tags is None: return False - if module.params.get('tags') is None: + if module.params.get("tags") is None: # Only purge tags if tags is explicitly set to {} and purge_tags is True purge_tags = False new_tags = dict() - if module.params.get('name') is not None: - new_tags['Name'] = module.params.get('name') - new_tags.update(module.params.get('tags') or {}) + if module.params.get("name") is not None: + new_tags["Name"] = module.params.get("name") + new_tags.update(module.params.get("tags") or {}) - return ensure_ec2_tags(client, module, nacl_id, tags=new_tags, - purge_tags=purge_tags, retry_codes=['InvalidNetworkAclID.NotFound']) + return ensure_ec2_tags( + client, module, nacl_id, tags=new_tags, purge_tags=purge_tags, retry_codes=["InvalidNetworkAclID.NotFound"] + ) def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module): @@ -267,60 +268,60 @@ def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module): if removed_rules: params = dict() for rule in removed_rules: - params['NetworkAclId'] = nacl_id - params['RuleNumber'] = rule['RuleNumber'] - params['Egress'] = Egress + params["NetworkAclId"] = nacl_id + params["RuleNumber"] = rule["RuleNumber"] + params["Egress"] = Egress delete_network_acl_entry(params, client, module) changed = True added_rules = [x for x in rules if x not in aws_rules] if added_rules: for rule in added_rules: - rule['NetworkAclId'] = nacl_id + rule["NetworkAclId"] = nacl_id create_network_acl_entry(rule, client, module) changed = True return changed def is_ipv6(cidr): - return ':' in cidr + return ":" in cidr def process_rule_entry(entry, Egress): params = dict() - params['RuleNumber'] = entry[0] - params['Protocol'] = str(PROTOCOL_NUMBERS[entry[1]]) - params['RuleAction'] = entry[2] - params['Egress'] = Egress + params["RuleNumber"] = entry[0] + params["Protocol"] = str(PROTOCOL_NUMBERS[entry[1]]) + params["RuleAction"] = entry[2] + params["Egress"] = Egress if is_ipv6(entry[3]): - params['Ipv6CidrBlock'] = entry[3] + params["Ipv6CidrBlock"] = entry[3] else: - params['CidrBlock'] = entry[3] + params["CidrBlock"] = entry[3] if icmp_present(entry): - params['IcmpTypeCode'] = {"Type": int(entry[4]), "Code": int(entry[5])} + params["IcmpTypeCode"] = {"Type": int(entry[4]), "Code": int(entry[5])} else: if entry[6] or entry[7]: - params['PortRange'] = {"From": entry[6], 'To': entry[7]} + params["PortRange"] = {"From": entry[6], "To": entry[7]} return params def restore_default_associations(assoc_ids, default_nacl_id, client, module): if assoc_ids: params = dict() - params['NetworkAclId'] = default_nacl_id[0] + params["NetworkAclId"] = default_nacl_id[0] for assoc_id in assoc_ids: - params['AssociationId'] = assoc_id + params["AssociationId"] = assoc_id restore_default_acl_association(params, client, module) return True def construct_acl_entries(nacl, client, module): - for entry in module.params.get('ingress'): + for entry in module.params.get("ingress"): params = process_rule_entry(entry, Egress=False) - params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId'] + params["NetworkAclId"] = nacl["NetworkAcl"]["NetworkAclId"] create_network_acl_entry(params, client, module) - for rule in module.params.get('egress'): + for rule in module.params.get("egress"): params = process_rule_entry(rule, Egress=True) - params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId'] + params["NetworkAclId"] = nacl["NetworkAcl"]["NetworkAclId"] create_network_acl_entry(params, client, module) @@ -328,21 +329,21 @@ def construct_acl_entries(nacl, client, module): def setup_network_acl(client, module): changed = False nacl = describe_network_acl(client, module) - if not nacl['NetworkAcls']: + if not nacl["NetworkAcls"]: tags = {} - if module.params.get('name'): - tags['Name'] = module.params.get('name') - tags.update(module.params.get('tags') or {}) - nacl = create_network_acl(module.params.get('vpc_id'), client, module, tags) - nacl_id = nacl['NetworkAcl']['NetworkAclId'] + if module.params.get("name"): + tags["Name"] = module.params.get("name") + tags.update(module.params.get("tags") or {}) + nacl = create_network_acl(module.params.get("vpc_id"), client, module, tags) + nacl_id = nacl["NetworkAcl"]["NetworkAclId"] subnets = subnets_to_associate(nacl, client, module) replace_network_acl_association(nacl_id, subnets, client, module) construct_acl_entries(nacl, client, module) changed = True - return changed, nacl['NetworkAcl']['NetworkAclId'] + return changed, nacl["NetworkAcl"]["NetworkAclId"] else: changed = False - nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] + nacl_id = nacl["NetworkAcls"][0]["NetworkAclId"] changed |= subnets_changed(nacl, client, module) changed |= nacls_changed(nacl, client, module) changed |= tags_changed(nacl_id, client, module) @@ -353,11 +354,11 @@ def remove_network_acl(client, module): changed = False result = dict() nacl = describe_network_acl(client, module) - if nacl['NetworkAcls']: - nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] - vpc_id = nacl['NetworkAcls'][0]['VpcId'] - associations = nacl['NetworkAcls'][0]['Associations'] - assoc_ids = [a['NetworkAclAssociationId'] for a in associations] + if nacl["NetworkAcls"]: + nacl_id = nacl["NetworkAcls"][0]["NetworkAclId"] + vpc_id = nacl["NetworkAcls"][0]["VpcId"] + associations = nacl["NetworkAcls"][0]["Associations"] + assoc_ids = [a["NetworkAclAssociationId"] for a in associations] default_nacl_id = find_default_vpc_nacl(vpc_id, client, module) if not default_nacl_id: result = {vpc_id: "Default NACL ID not found - Check the VPC ID"} @@ -384,7 +385,7 @@ def _create_network_acl(client, *args, **kwargs): def create_network_acl(vpc_id, client, module, tags): params = dict(VpcId=vpc_id) if tags: - params['TagSpecifications'] = boto3_tag_specifications(tags, ['network-acl']) + params["TagSpecifications"] = boto3_tag_specifications(tags, ["network-acl"]) try: if module.check_mode: nacl = dict(NetworkAcl=dict(NetworkAclId="nacl-00000000")) @@ -395,7 +396,7 @@ def create_network_acl(vpc_id, client, module, tags): return nacl -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) def _create_network_acl_entry(client, *args, **kwargs): return client.create_network_acl_entry(*args, **kwargs) @@ -421,7 +422,7 @@ def delete_network_acl(nacl_id, client, module): module.fail_json_aws(e) -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) def _delete_network_acl_entry(client, *args, **kwargs): return client.delete_network_acl_entry(*args, **kwargs) @@ -439,7 +440,7 @@ def _describe_network_acls(client, **kwargs): return client.describe_network_acls(**kwargs) -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) def _describe_network_acls_retry_missing(client, **kwargs): return client.describe_network_acls(**kwargs) @@ -448,25 +449,23 @@ def describe_acl_associations(subnets, client, module): if not subnets: return [] try: - results = _describe_network_acls_retry_missing(client, Filters=[ - {'Name': 'association.subnet-id', 'Values': subnets} - ]) + results = _describe_network_acls_retry_missing( + client, Filters=[{"Name": "association.subnet-id", "Values": subnets}] + ) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) - associations = results['NetworkAcls'][0]['Associations'] - return [a['NetworkAclAssociationId'] for a in associations if a['SubnetId'] in subnets] + associations = results["NetworkAcls"][0]["Associations"] + return [a["NetworkAclAssociationId"] for a in associations if a["SubnetId"] in subnets] def describe_network_acl(client, module): try: - if module.params.get('nacl_id'): - nacl = _describe_network_acls(client, Filters=[ - {'Name': 'network-acl-id', 'Values': [module.params.get('nacl_id')]} - ]) + if module.params.get("nacl_id"): + nacl = _describe_network_acls( + client, Filters=[{"Name": "network-acl-id", "Values": [module.params.get("nacl_id")]}] + ) else: - nacl = _describe_network_acls(client, Filters=[ - {'Name': 'tag:Name', 'Values': [module.params.get('name')]} - ]) + nacl = _describe_network_acls(client, Filters=[{"Name": "tag:Name", "Values": [module.params.get("name")]}]) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) return nacl @@ -481,38 +480,37 @@ def find_acl_by_id(nacl_id, client, module): def find_default_vpc_nacl(vpc_id, client, module): try: - response = _describe_network_acls_retry_missing(client, Filters=[ - {'Name': 'vpc-id', 'Values': [vpc_id]}]) + response = _describe_network_acls_retry_missing(client, Filters=[{"Name": "vpc-id", "Values": [vpc_id]}]) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) - nacls = response['NetworkAcls'] - return [n['NetworkAclId'] for n in nacls if n['IsDefault'] is True] + nacls = response["NetworkAcls"] + return [n["NetworkAclId"] for n in nacls if n["IsDefault"] is True] def find_subnet_ids_by_nacl_id(nacl_id, client, module): try: - results = _describe_network_acls_retry_missing(client, Filters=[ - {'Name': 'association.network-acl-id', 'Values': [nacl_id]} - ]) + results = _describe_network_acls_retry_missing( + client, Filters=[{"Name": "association.network-acl-id", "Values": [nacl_id]}] + ) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) - if results['NetworkAcls']: - associations = results['NetworkAcls'][0]['Associations'] - return [s['SubnetId'] for s in associations if s['SubnetId']] + if results["NetworkAcls"]: + associations = results["NetworkAcls"][0]["Associations"] + return [s["SubnetId"] for s in associations if s["SubnetId"]] else: return [] -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) def _replace_network_acl_association(client, *args, **kwargs): return client.replace_network_acl_association(*args, **kwargs) def replace_network_acl_association(nacl_id, subnets, client, module): params = dict() - params['NetworkAclId'] = nacl_id + params["NetworkAclId"] = nacl_id for association in describe_acl_associations(subnets, client, module): - params['AssociationId'] = association + params["AssociationId"] = association try: if not module.check_mode: _replace_network_acl_association(client, **params) @@ -520,7 +518,7 @@ def replace_network_acl_association(nacl_id, subnets, client, module): module.fail_json_aws(e) -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) def _replace_network_acl_entry(client, *args, **kwargs): return client.replace_network_acl_entry(*args, **kwargs) @@ -528,7 +526,7 @@ def _replace_network_acl_entry(client, *args, **kwargs): def replace_network_acl_entry(entries, Egress, nacl_id, client, module): for entry in entries: params = entry - params['NetworkAclId'] = nacl_id + params["NetworkAclId"] = nacl_id try: if not module.check_mode: _replace_network_acl_entry(client, **params) @@ -536,7 +534,7 @@ def replace_network_acl_entry(entries, Egress, nacl_id, client, module): module.fail_json_aws(e) -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) def _replace_network_acl_association(client, *args, **kwargs): return client.replace_network_acl_association(*args, **kwargs) @@ -555,25 +553,23 @@ def _describe_subnets(client, *args, **kwargs): def subnets_to_associate(nacl, client, module): - params = list(module.params.get('subnets')) + params = list(module.params.get("subnets")) if not params: return [] all_found = [] if any(x.startswith("subnet-") for x in params): try: - subnets = _describe_subnets(client, Filters=[ - {'Name': 'subnet-id', 'Values': params}]) - all_found.extend(subnets.get('Subnets', [])) + subnets = _describe_subnets(client, Filters=[{"Name": "subnet-id", "Values": params}]) + all_found.extend(subnets.get("Subnets", [])) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) if len(params) != len(all_found): try: - subnets = _describe_subnets(client, Filters=[ - {'Name': 'tag:Name', 'Values': params}]) - all_found.extend(subnets.get('Subnets', [])) + subnets = _describe_subnets(client, Filters=[{"Name": "tag:Name", "Values": params}]) + all_found.extend(subnets.get("Subnets", [])) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) - return list(set(s['SubnetId'] for s in all_found if s.get('SubnetId'))) + return list(set(s["SubnetId"] for s in all_found if s.get("SubnetId"))) def main(): @@ -581,29 +577,31 @@ def main(): vpc_id=dict(), name=dict(), nacl_id=dict(), - subnets=dict(required=False, type='list', default=list(), elements='str'), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(required=False, type='bool', default=True), - ingress=dict(required=False, type='list', default=list(), elements='list'), - egress=dict(required=False, type='list', default=list(), elements='list'), - state=dict(default='present', choices=['present', 'absent']), + subnets=dict(required=False, type="list", default=list(), elements="str"), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(required=False, type="bool", default=True), + ingress=dict(required=False, type="list", default=list(), elements="list"), + egress=dict(required=False, type="list", default=list(), elements="list"), + state=dict(default="present", choices=["present", "absent"]), + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[["name", "nacl_id"]], + required_if=[["state", "present", ["vpc_id"]]], ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=[['name', 'nacl_id']], - required_if=[['state', 'present', ['vpc_id']]]) - state = module.params.get('state').lower() + state = module.params.get("state").lower() - client = module.client('ec2') + client = module.client("ec2") invocations = { "present": setup_network_acl, - "absent": remove_network_acl + "absent": remove_network_acl, } (changed, results) = invocations[state](client, module) module.exit_json(changed=changed, nacl_id=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ec2_vpc_nacl_info.py b/plugins/modules/ec2_vpc_nacl_info.py index 3d37cf26524..ecf530a9d74 100644 --- a/plugins/modules/ec2_vpc_nacl_info.py +++ b/plugins/modules/ec2_vpc_nacl_info.py @@ -119,11 +119,10 @@ # VPC-supported IANA protocol numbers # http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml -PROTOCOL_NAMES = {'-1': 'all', '1': 'icmp', '6': 'tcp', '17': 'udp'} +PROTOCOL_NAMES = {"-1": "all", "1": "icmp", "6": "tcp", "17": "udp"} def list_ec2_vpc_nacls(connection, module): - nacl_ids = module.params.get("nacl_ids") filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) @@ -132,86 +131,97 @@ def list_ec2_vpc_nacls(connection, module): try: nacls = connection.describe_network_acls(aws_retry=True, NetworkAclIds=nacl_ids, Filters=filters) - except is_boto3_error_code('InvalidNetworkAclID.NotFound'): - module.fail_json(msg='Unable to describe ACL. NetworkAcl does not exist') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except is_boto3_error_code("InvalidNetworkAclID.NotFound"): + module.fail_json(msg="Unable to describe ACL. NetworkAcl does not exist") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to describe network ACLs {0}".format(nacl_ids)) # Turn the boto3 result in to ansible_friendly_snaked_names snaked_nacls = [] - for nacl in nacls['NetworkAcls']: + for nacl in nacls["NetworkAcls"]: snaked_nacls.append(camel_dict_to_snake_dict(nacl)) # Turn the boto3 result in to ansible friendly tag dictionary for nacl in snaked_nacls: - if 'tags' in nacl: - nacl['tags'] = boto3_tag_list_to_ansible_dict(nacl['tags'], 'key', 'value') - if 'entries' in nacl: - nacl['egress'] = [nacl_entry_to_list(entry) for entry in nacl['entries'] - if entry['rule_number'] < 32767 and entry['egress']] - nacl['ingress'] = [nacl_entry_to_list(entry) for entry in nacl['entries'] - if entry['rule_number'] < 32767 and not entry['egress']] - del nacl['entries'] - if 'associations' in nacl: - nacl['subnets'] = [a['subnet_id'] for a in nacl['associations']] - del nacl['associations'] - if 'network_acl_id' in nacl: - nacl['nacl_id'] = nacl['network_acl_id'] - del nacl['network_acl_id'] + if "tags" in nacl: + nacl["tags"] = boto3_tag_list_to_ansible_dict(nacl["tags"], "key", "value") + if "entries" in nacl: + nacl["egress"] = [ + nacl_entry_to_list(entry) + for entry in nacl["entries"] + if entry["rule_number"] < 32767 and entry["egress"] + ] + nacl["ingress"] = [ + nacl_entry_to_list(entry) + for entry in nacl["entries"] + if entry["rule_number"] < 32767 and not entry["egress"] + ] + del nacl["entries"] + if "associations" in nacl: + nacl["subnets"] = [a["subnet_id"] for a in nacl["associations"]] + del nacl["associations"] + if "network_acl_id" in nacl: + nacl["nacl_id"] = nacl["network_acl_id"] + del nacl["network_acl_id"] module.exit_json(nacls=snaked_nacls) def nacl_entry_to_list(entry): - # entry list format # [ rule_num, protocol name or number, allow or deny, ipv4/6 cidr, icmp type, icmp code, port from, port to] elist = [] - elist.append(entry['rule_number']) + elist.append(entry["rule_number"]) - if entry.get('protocol') in PROTOCOL_NAMES: - elist.append(PROTOCOL_NAMES[entry['protocol']]) + if entry.get("protocol") in PROTOCOL_NAMES: + elist.append(PROTOCOL_NAMES[entry["protocol"]]) else: - elist.append(entry.get('protocol')) + elist.append(entry.get("protocol")) - elist.append(entry['rule_action']) + elist.append(entry["rule_action"]) - if entry.get('cidr_block'): - elist.append(entry['cidr_block']) - elif entry.get('ipv6_cidr_block'): - elist.append(entry['ipv6_cidr_block']) + if entry.get("cidr_block"): + elist.append(entry["cidr_block"]) + elif entry.get("ipv6_cidr_block"): + elist.append(entry["ipv6_cidr_block"]) else: elist.append(None) elist = elist + [None, None, None, None] - if entry['protocol'] in ('1', '58'): - elist[4] = entry.get('icmp_type_code', {}).get('type') - elist[5] = entry.get('icmp_type_code', {}).get('code') + if entry["protocol"] in ("1", "58"): + elist[4] = entry.get("icmp_type_code", {}).get("type") + elist[5] = entry.get("icmp_type_code", {}).get("code") - if entry['protocol'] not in ('1', '6', '17', '58'): + if entry["protocol"] not in ("1", "6", "17", "58"): elist[6] = 0 elist[7] = 65535 - elif 'port_range' in entry: - elist[6] = entry['port_range']['from'] - elist[7] = entry['port_range']['to'] + elif "port_range" in entry: + elist[6] = entry["port_range"]["from"] + elist[7] = entry["port_range"]["to"] return elist def main(): - argument_spec = dict( - nacl_ids=dict(default=[], type='list', aliases=['nacl_id'], elements='str'), - filters=dict(default={}, type='dict')) + nacl_ids=dict(default=[], type="list", aliases=["nacl_id"], elements="str"), + filters=dict(default={}, type="dict"), + ) - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) list_ec2_vpc_nacls(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ec2_vpc_peer.py b/plugins/modules/ec2_vpc_peer.py index a5af559cc9d..465c9c852eb 100644 --- a/plugins/modules/ec2_vpc_peer.py +++ b/plugins/modules/ec2_vpc_peer.py @@ -370,15 +370,13 @@ def wait_for_state(client, module, state, pcx_id): - waiter = client.get_waiter('vpc_peering_connection_exists') + waiter = client.get_waiter("vpc_peering_connection_exists") peer_filter = { - 'vpc-peering-connection-id': pcx_id, - 'status-code': state, + "vpc-peering-connection-id": pcx_id, + "status-code": state, } try: - waiter.wait( - Filters=ansible_dict_to_boto3_filter_list(peer_filter) - ) + waiter.wait(Filters=ansible_dict_to_boto3_filter_list(peer_filter)) except botocore.exceptions.WaiterError as e: module.fail_json_aws(e, "Failed to wait for state change") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -387,18 +385,18 @@ def wait_for_state(client, module, state, pcx_id): def describe_peering_connections(params, client): peer_filter = { - 'requester-vpc-info.vpc-id': params['VpcId'], - 'accepter-vpc-info.vpc-id': params['PeerVpcId'], + "requester-vpc-info.vpc-id": params["VpcId"], + "accepter-vpc-info.vpc-id": params["PeerVpcId"], } result = client.describe_vpc_peering_connections( aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(peer_filter), ) - if result['VpcPeeringConnections'] == []: + if result["VpcPeeringConnections"] == []: # Try again with the VPC/Peer relationship reversed peer_filter = { - 'requester-vpc-info.vpc-id': params['PeerVpcId'], - 'accepter-vpc-info.vpc-id': params['VpcId'], + "requester-vpc-info.vpc-id": params["PeerVpcId"], + "accepter-vpc-info.vpc-id": params["VpcId"], } result = client.describe_vpc_peering_connections( aws_retry=True, @@ -409,29 +407,32 @@ def describe_peering_connections(params, client): def is_active(peering_conn): - return peering_conn['Status']['Code'] == 'active' + return peering_conn["Status"]["Code"] == "active" def is_pending(peering_conn): - return peering_conn['Status']['Code'] == 'pending-acceptance' + return peering_conn["Status"]["Code"] == "pending-acceptance" def create_peer_connection(client, module): changed = False params = dict() - params['VpcId'] = module.params.get('vpc_id') - params['PeerVpcId'] = module.params.get('peer_vpc_id') - if module.params.get('peer_region'): - params['PeerRegion'] = module.params.get('peer_region') - if module.params.get('peer_owner_id'): - params['PeerOwnerId'] = str(module.params.get('peer_owner_id')) + params["VpcId"] = module.params.get("vpc_id") + params["PeerVpcId"] = module.params.get("peer_vpc_id") + if module.params.get("peer_region"): + params["PeerRegion"] = module.params.get("peer_region") + if module.params.get("peer_owner_id"): + params["PeerOwnerId"] = str(module.params.get("peer_owner_id")) peering_conns = describe_peering_connections(params, client) - for peering_conn in peering_conns['VpcPeeringConnections']: - pcx_id = peering_conn['VpcPeeringConnectionId'] - if ensure_ec2_tags(client, module, pcx_id, - purge_tags=module.params.get('purge_tags'), - tags=module.params.get('tags'), - ): + for peering_conn in peering_conns["VpcPeeringConnections"]: + pcx_id = peering_conn["VpcPeeringConnectionId"] + if ensure_ec2_tags( + client, + module, + pcx_id, + purge_tags=module.params.get("purge_tags"), + tags=module.params.get("tags"), + ): changed = True if is_active(peering_conn): return (changed, peering_conn) @@ -439,54 +440,59 @@ def create_peer_connection(client, module): return (changed, peering_conn) try: peering_conn = client.create_vpc_peering_connection(aws_retry=True, **params) - pcx_id = peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId'] - if module.params.get('tags'): + pcx_id = peering_conn["VpcPeeringConnection"]["VpcPeeringConnectionId"] + if module.params.get("tags"): # Once the minimum botocore version is bumped to > 1.17.24 # (hopefully community.aws 3.0.0) we can add the tags to the # creation parameters - add_ec2_tags(client, module, pcx_id, module.params.get('tags'), - retry_codes=['InvalidVpcPeeringConnectionID.NotFound']) - if module.params.get('wait'): - wait_for_state(client, module, 'pending-acceptance', pcx_id) + add_ec2_tags( + client, + module, + pcx_id, + module.params.get("tags"), + retry_codes=["InvalidVpcPeeringConnectionID.NotFound"], + ) + if module.params.get("wait"): + wait_for_state(client, module, "pending-acceptance", pcx_id) changed = True - return (changed, peering_conn['VpcPeeringConnection']) + return (changed, peering_conn["VpcPeeringConnection"]) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) def remove_peer_connection(client, module): - pcx_id = module.params.get('peering_id') + pcx_id = module.params.get("peering_id") if pcx_id: peering_conn = get_peering_connection_by_id(pcx_id, client, module) else: params = dict() - params['VpcId'] = module.params.get('vpc_id') - params['PeerVpcId'] = module.params.get('peer_vpc_id') - params['PeerRegion'] = module.params.get('peer_region') - if module.params.get('peer_owner_id'): - params['PeerOwnerId'] = str(module.params.get('peer_owner_id')) - peering_conn = describe_peering_connections(params, client)['VpcPeeringConnections'][0] + params["VpcId"] = module.params.get("vpc_id") + params["PeerVpcId"] = module.params.get("peer_vpc_id") + params["PeerRegion"] = module.params.get("peer_region") + if module.params.get("peer_owner_id"): + params["PeerOwnerId"] = str(module.params.get("peer_owner_id")) + peering_conn = describe_peering_connections(params, client)["VpcPeeringConnections"][0] if not peering_conn: module.exit_json(changed=False) else: - pcx_id = pcx_id or peering_conn['VpcPeeringConnectionId'] + pcx_id = pcx_id or peering_conn["VpcPeeringConnectionId"] - if peering_conn['Status']['Code'] == 'deleted': - module.exit_json(msg='Connection in deleted state.', changed=False, peering_id=pcx_id) - if peering_conn['Status']['Code'] == 'rejected': + if peering_conn["Status"]["Code"] == "deleted": + module.exit_json(msg="Connection in deleted state.", changed=False, peering_id=pcx_id) + if peering_conn["Status"]["Code"] == "rejected": module.exit_json( - msg='Connection has been rejected. State cannot be changed and will be removed automatically by AWS', + msg="Connection has been rejected. State cannot be changed and will be removed automatically by AWS", changed=False, - peering_id=pcx_id + peering_id=pcx_id, ) try: params = dict() - params['VpcPeeringConnectionId'] = pcx_id + params["VpcPeeringConnectionId"] = pcx_id client.delete_vpc_peering_connection(aws_retry=True, **params) - if module.params.get('wait'): - wait_for_state(client, module, 'deleted', pcx_id) + if module.params.get("wait"): + wait_for_state(client, module, "deleted", pcx_id) module.exit_json(changed=True, peering_id=pcx_id) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) @@ -494,44 +500,55 @@ def remove_peer_connection(client, module): def get_peering_connection_by_id(peering_id, client, module): params = dict() - params['VpcPeeringConnectionIds'] = [peering_id] + params["VpcPeeringConnectionIds"] = [peering_id] try: vpc_peering_connection = client.describe_vpc_peering_connections(aws_retry=True, **params) - return vpc_peering_connection['VpcPeeringConnections'][0] - except is_boto3_error_code('InvalidVpcPeeringConnectionId.Malformed') as e: - module.fail_json_aws(e, msg='Malformed connection ID') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Error while describing peering connection by peering_id') + return vpc_peering_connection["VpcPeeringConnections"][0] + except is_boto3_error_code("InvalidVpcPeeringConnectionId.Malformed") as e: + module.fail_json_aws(e, msg="Malformed connection ID") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Error while describing peering connection by peering_id") def accept_reject(state, client, module): changed = False params = dict() - peering_id = module.params.get('peering_id') - params['VpcPeeringConnectionId'] = peering_id + peering_id = module.params.get("peering_id") + params["VpcPeeringConnectionId"] = peering_id vpc_peering_connection = get_peering_connection_by_id(peering_id, client, module) - peering_status = vpc_peering_connection['Status']['Code'] + peering_status = vpc_peering_connection["Status"]["Code"] - if peering_status not in ['active', 'rejected']: + if peering_status not in ["active", "rejected"]: try: - if state == 'accept': + if state == "accept": client.accept_vpc_peering_connection(aws_retry=True, **params) - target_state = 'active' + target_state = "active" else: client.reject_vpc_peering_connection(aws_retry=True, **params) - target_state = 'rejected' - if module.params.get('tags'): - add_ec2_tags(client, module, peering_id, module.params.get('tags'), - retry_codes=['InvalidVpcPeeringConnectionID.NotFound']) + target_state = "rejected" + if module.params.get("tags"): + add_ec2_tags( + client, + module, + peering_id, + module.params.get("tags"), + retry_codes=["InvalidVpcPeeringConnectionID.NotFound"], + ) changed = True - if module.params.get('wait'): + if module.params.get("wait"): wait_for_state(client, module, target_state, peering_id) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) - if ensure_ec2_tags(client, module, peering_id, - purge_tags=module.params.get('purge_tags'), - tags=module.params.get('tags'), - ): + if ensure_ec2_tags( + client, + module, + peering_id, + purge_tags=module.params.get("purge_tags"), + tags=module.params.get("tags"), + ): changed = True # Relaod peering conection infos to return latest state/params @@ -546,34 +563,36 @@ def main(): peer_region=dict(), peering_id=dict(), peer_owner_id=dict(), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), - state=dict(default='present', choices=['present', 'absent', 'accept', 'reject']), - wait=dict(default=False, type='bool'), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), + state=dict(default="present", choices=["present", "absent", "accept", "reject"]), + wait=dict(default=False, type="bool"), ) required_if = [ - ('state', 'present', ['vpc_id', 'peer_vpc_id']), - ('state', 'accept', ['peering_id']), - ('state', 'reject', ['peering_id']) + ("state", "present", ["vpc_id", "peer_vpc_id"]), + ("state", "accept", ["peering_id"]), + ("state", "reject", ["peering_id"]), ] module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if) - state = module.params.get('state') - peering_id = module.params.get('peering_id') - vpc_id = module.params.get('vpc_id') - peer_vpc_id = module.params.get('peer_vpc_id') + state = module.params.get("state") + peering_id = module.params.get("peering_id") + vpc_id = module.params.get("vpc_id") + peer_vpc_id = module.params.get("peer_vpc_id") try: - client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - if state == 'present': + if state == "present": (changed, results) = create_peer_connection(client, module) - elif state == 'absent': + elif state == "absent": if not peering_id and (not vpc_id or not peer_vpc_id): - module.fail_json(msg='state is absent but one of the following is missing: peering_id or [vpc_id, peer_vpc_id]') + module.fail_json( + msg="state is absent but one of the following is missing: peering_id or [vpc_id, peer_vpc_id]" + ) remove_peer_connection(client, module) else: @@ -581,10 +600,12 @@ def main(): formatted_results = camel_dict_to_snake_dict(results) # Turn the resource tags from boto3 into an ansible friendly tag dictionary - formatted_results['tags'] = boto3_tag_list_to_ansible_dict(formatted_results.get('tags', [])) + formatted_results["tags"] = boto3_tag_list_to_ansible_dict(formatted_results.get("tags", [])) - module.exit_json(changed=changed, vpc_peering_connection=formatted_results, peering_id=results['VpcPeeringConnectionId']) + module.exit_json( + changed=changed, vpc_peering_connection=formatted_results, peering_id=results["VpcPeeringConnectionId"] + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ec2_vpc_peering_info.py b/plugins/modules/ec2_vpc_peering_info.py index 8faf64b8906..ee9fda32118 100644 --- a/plugins/modules/ec2_vpc_peering_info.py +++ b/plugins/modules/ec2_vpc_peering_info.py @@ -216,41 +216,43 @@ def get_vpc_peers(client, module): params = dict() - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - if module.params.get('peer_connection_ids'): - params['VpcPeeringConnectionIds'] = module.params.get('peer_connection_ids') + params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + if module.params.get("peer_connection_ids"): + params["VpcPeeringConnectionIds"] = module.params.get("peer_connection_ids") try: result = client.describe_vpc_peering_connections(aws_retry=True, **params) result = normalize_boto3_result(result) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe peering connections") - return result['VpcPeeringConnections'] + return result["VpcPeeringConnections"] def main(): argument_spec = dict( - filters=dict(default=dict(), type='dict'), - peer_connection_ids=dict(default=None, type='list', elements='str'), + filters=dict(default=dict(), type="dict"), + peer_connection_ids=dict(default=None, type="list", elements="str"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True,) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) try: - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + ec2 = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") # Turn the boto3 result in to ansible friendly_snaked_names results = [camel_dict_to_snake_dict(peer) for peer in get_vpc_peers(ec2, module)] # Turn the boto3 result in to ansible friendly tag dictionary for peer in results: - peer['tags'] = boto3_tag_list_to_ansible_dict(peer.get('tags', [])) + peer["tags"] = boto3_tag_list_to_ansible_dict(peer.get("tags", [])) module.exit_json(result=results, vpc_peering_connections=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ec2_vpc_vgw.py b/plugins/modules/ec2_vpc_vgw.py index 74aab4a077c..e59fe25839c 100644 --- a/plugins/modules/ec2_vpc_vgw.py +++ b/plugins/modules/ec2_vpc_vgw.py @@ -155,11 +155,14 @@ class VGWRetry(AWSRetry): @staticmethod def status_code_from_exception(error): - return (error.response['Error']['Code'], error.response['Error']['Message'],) + return ( + error.response["Error"]["Code"], + error.response["Error"]["Message"], + ) @staticmethod def found(response_code, catch_extra_error_codes=None): - retry_on = ['The maximum number of mutating objects has been reached.'] + retry_on = ["The maximum number of mutating objects has been reached."] if catch_extra_error_codes: retry_on.extend(catch_extra_error_codes) @@ -179,37 +182,37 @@ def get_vgw_info(vgws): for vgw in vgws: vgw_info = { - 'id': vgw['VpnGatewayId'], - 'type': vgw['Type'], - 'state': vgw['State'], - 'vpc_id': None, - 'tags': dict() + "id": vgw["VpnGatewayId"], + "type": vgw["Type"], + "state": vgw["State"], + "vpc_id": None, + "tags": dict(), } - if vgw['Tags']: - vgw_info['tags'] = boto3_tag_list_to_ansible_dict(vgw['Tags']) + if vgw["Tags"]: + vgw_info["tags"] = boto3_tag_list_to_ansible_dict(vgw["Tags"]) - if len(vgw['VpcAttachments']) != 0 and vgw['VpcAttachments'][0]['State'] == 'attached': - vgw_info['vpc_id'] = vgw['VpcAttachments'][0]['VpcId'] + if len(vgw["VpcAttachments"]) != 0 and vgw["VpcAttachments"][0]["State"] == "attached": + vgw_info["vpc_id"] = vgw["VpcAttachments"][0]["VpcId"] return vgw_info def wait_for_status(client, module, vpn_gateway_id, status): polling_increment_secs = 15 - max_retries = (module.params.get('wait_timeout') // polling_increment_secs) + max_retries = module.params.get("wait_timeout") // polling_increment_secs status_achieved = False for x in range(0, max_retries): try: response = find_vgw(client, module, vpn_gateway_id) - if response[0]['VpcAttachments'][0]['State'] == status: + if response[0]["VpcAttachments"][0]["State"] == status: status_achieved = True break else: time.sleep(polling_increment_secs) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failure while waiting for status update') + module.fail_json_aws(e, msg="Failure while waiting for status update") result = response return status_achieved, result @@ -217,22 +220,21 @@ def wait_for_status(client, module, vpn_gateway_id, status): def attach_vgw(client, module, vpn_gateway_id): params = dict() - params['VpcId'] = module.params.get('vpc_id') + params["VpcId"] = module.params.get("vpc_id") try: # Immediately after a detachment, the EC2 API sometimes will report the VpnGateways[0].State # as available several seconds before actually permitting a new attachment. # So we catch and retry that error. See https://github.com/ansible/ansible/issues/53185 - response = VGWRetry.jittered_backoff(retries=5, - catch_extra_error_codes=['InvalidParameterValue'] - )(client.attach_vpn_gateway)(VpnGatewayId=vpn_gateway_id, - VpcId=params['VpcId']) + response = VGWRetry.jittered_backoff(retries=5, catch_extra_error_codes=["InvalidParameterValue"])( + client.attach_vpn_gateway + )(VpnGatewayId=vpn_gateway_id, VpcId=params["VpcId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to attach VPC') + module.fail_json_aws(e, msg="Failed to attach VPC") - status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'attached') + status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], "attached") if not status_achieved: - module.fail_json(msg='Error waiting for vpc to attach to vgw - please check the AWS console') + module.fail_json(msg="Error waiting for vpc to attach to vgw - please check the AWS console") result = response return result @@ -240,19 +242,19 @@ def attach_vgw(client, module, vpn_gateway_id): def detach_vgw(client, module, vpn_gateway_id, vpc_id=None): params = dict() - params['VpcId'] = module.params.get('vpc_id') + params["VpcId"] = module.params.get("vpc_id") try: if vpc_id: response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=vpc_id, aws_retry=True) else: - response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId'], aws_retry=True) + response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params["VpcId"], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, 'Failed to detach gateway') + module.fail_json_aws(e, "Failed to detach gateway") - status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'detached') + status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], "detached") if not status_achieved: - module.fail_json(msg='Error waiting for vpc to detach from vgw - please check the AWS console') + module.fail_json(msg="Error waiting for vpc to detach from vgw - please check the AWS console") result = response return result @@ -260,37 +262,37 @@ def detach_vgw(client, module, vpn_gateway_id, vpc_id=None): def create_vgw(client, module): params = dict() - params['Type'] = module.params.get('type') - tags = module.params.get('tags') or {} - tags['Name'] = module.params.get('name') - params['TagSpecifications'] = boto3_tag_specifications(tags, ['vpn-gateway']) - if module.params.get('asn'): - params['AmazonSideAsn'] = module.params.get('asn') + params["Type"] = module.params.get("type") + tags = module.params.get("tags") or {} + tags["Name"] = module.params.get("name") + params["TagSpecifications"] = boto3_tag_specifications(tags, ["vpn-gateway"]) + if module.params.get("asn"): + params["AmazonSideAsn"] = module.params.get("asn") try: response = client.create_vpn_gateway(aws_retry=True, **params) - get_waiter( - client, 'vpn_gateway_exists' - ).wait( - VpnGatewayIds=[response['VpnGateway']['VpnGatewayId']] - ) + get_waiter(client, "vpn_gateway_exists").wait(VpnGatewayIds=[response["VpnGateway"]["VpnGatewayId"]]) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg="Failed to wait for Vpn Gateway {0} to be available".format(response['VpnGateway']['VpnGatewayId'])) - except is_boto3_error_code('VpnGatewayLimitExceeded') as e: + module.fail_json_aws( + e, msg="Failed to wait for Vpn Gateway {0} to be available".format(response["VpnGateway"]["VpnGatewayId"]) + ) + except is_boto3_error_code("VpnGatewayLimitExceeded") as e: module.fail_json_aws(e, msg="Too many VPN gateways exist in this account.") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to create gateway') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to create gateway") result = response return result def delete_vgw(client, module, vpn_gateway_id): - try: response = client.delete_vpn_gateway(VpnGatewayId=vpn_gateway_id, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to delete gateway') + module.fail_json_aws(e, msg="Failed to delete gateway") # return the deleted VpnGatewayId as this is not included in the above response result = vpn_gateway_id @@ -299,13 +301,13 @@ def delete_vgw(client, module, vpn_gateway_id): def find_vpc(client, module): params = dict() - params['vpc_id'] = module.params.get('vpc_id') + params["vpc_id"] = module.params.get("vpc_id") - if params['vpc_id']: + if params["vpc_id"]: try: - response = client.describe_vpcs(VpcIds=[params['vpc_id']], aws_retry=True) + response = client.describe_vpcs(VpcIds=[params["vpc_id"]], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to describe VPC') + module.fail_json_aws(e, msg="Failed to describe VPC") result = response return result @@ -314,66 +316,68 @@ def find_vpc(client, module): def find_vgw(client, module, vpn_gateway_id=None): params = dict() if vpn_gateway_id: - params['VpnGatewayIds'] = vpn_gateway_id + params["VpnGatewayIds"] = vpn_gateway_id else: - params['Filters'] = [ - {'Name': 'type', 'Values': [module.params.get('type')]}, - {'Name': 'tag:Name', 'Values': [module.params.get('name')]}, + params["Filters"] = [ + {"Name": "type", "Values": [module.params.get("type")]}, + {"Name": "tag:Name", "Values": [module.params.get("name")]}, ] - if module.params.get('state') == 'present': - params['Filters'].append({'Name': 'state', 'Values': ['pending', 'available']}) + if module.params.get("state") == "present": + params["Filters"].append({"Name": "state", "Values": ["pending", "available"]}) try: response = client.describe_vpn_gateways(aws_retry=True, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to describe gateway using filters') + module.fail_json_aws(e, msg="Failed to describe gateway using filters") - return sorted(response['VpnGateways'], key=lambda k: k['VpnGatewayId']) + return sorted(response["VpnGateways"], key=lambda k: k["VpnGatewayId"]) def ensure_vgw_present(client, module): - # If an existing vgw name and type matches our args, then a match is considered to have been # found and we will not create another vgw. changed = False params = dict() result = dict() - params['Name'] = module.params.get('name') - params['VpcId'] = module.params.get('vpc_id') - params['Type'] = module.params.get('type') - params['Tags'] = module.params.get('tags') - params['VpnGatewayIds'] = module.params.get('vpn_gateway_id') + params["Name"] = module.params.get("name") + params["VpcId"] = module.params.get("vpc_id") + params["Type"] = module.params.get("type") + params["Tags"] = module.params.get("tags") + params["VpnGatewayIds"] = module.params.get("vpn_gateway_id") # check that the vpc_id exists. If not, an exception is thrown - if params['VpcId']: + if params["VpcId"]: vpc = find_vpc(client, module) # check if a gateway matching our module args already exists existing_vgw = find_vgw(client, module) if existing_vgw != []: - vpn_gateway_id = existing_vgw[0]['VpnGatewayId'] - desired_tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') + vpn_gateway_id = existing_vgw[0]["VpnGatewayId"] + desired_tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") if desired_tags is None: desired_tags = dict() purge_tags = False - tags = dict(Name=module.params.get('name')) + tags = dict(Name=module.params.get("name")) tags.update(desired_tags) - changed = ensure_ec2_tags(client, module, vpn_gateway_id, resource_type='vpn-gateway', - tags=tags, purge_tags=purge_tags) + changed = ensure_ec2_tags( + client, module, vpn_gateway_id, resource_type="vpn-gateway", tags=tags, purge_tags=purge_tags + ) # if a vpc_id was provided, check if it exists and if it's attached - if params['VpcId']: - - current_vpc_attachments = existing_vgw[0]['VpcAttachments'] - - if current_vpc_attachments != [] and current_vpc_attachments[0]['State'] == 'attached': - if current_vpc_attachments[0]['VpcId'] != params['VpcId'] or current_vpc_attachments[0]['State'] != 'attached': + if params["VpcId"]: + current_vpc_attachments = existing_vgw[0]["VpcAttachments"] + + if current_vpc_attachments != [] and current_vpc_attachments[0]["State"] == "attached": + if ( + current_vpc_attachments[0]["VpcId"] != params["VpcId"] + or current_vpc_attachments[0]["State"] != "attached" + ): # detach the existing vpc from the virtual gateway - vpc_to_detach = current_vpc_attachments[0]['VpcId'] + vpc_to_detach = current_vpc_attachments[0]["VpcId"] detach_vgw(client, module, vpn_gateway_id, vpc_to_detach) - get_waiter(client, 'vpn_gateway_detached').wait(VpnGatewayIds=[vpn_gateway_id]) + get_waiter(client, "vpn_gateway_detached").wait(VpnGatewayIds=[vpn_gateway_id]) attached_vgw = attach_vgw(client, module, vpn_gateway_id) changed = True else: @@ -385,10 +389,10 @@ def ensure_vgw_present(client, module): else: existing_vgw = find_vgw(client, module, [vpn_gateway_id]) - if existing_vgw[0]['VpcAttachments'] != []: - if existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached': + if existing_vgw[0]["VpcAttachments"] != []: + if existing_vgw[0]["VpcAttachments"][0]["State"] == "attached": # detach the vpc from the vgw - vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId'] + vpc_to_detach = existing_vgw[0]["VpcAttachments"][0]["VpcId"] detach_vgw(client, module, vpn_gateway_id, vpc_to_detach) changed = True @@ -396,10 +400,10 @@ def ensure_vgw_present(client, module): # create a new vgw new_vgw = create_vgw(client, module) changed = True - vpn_gateway_id = new_vgw['VpnGateway']['VpnGatewayId'] + vpn_gateway_id = new_vgw["VpnGateway"]["VpnGatewayId"] # if a vpc-id was supplied, attempt to attach it to the vgw - if params['VpcId']: + if params["VpcId"]: attached_vgw = attach_vgw(client, module, vpn_gateway_id) changed = True @@ -410,45 +414,46 @@ def ensure_vgw_present(client, module): def ensure_vgw_absent(client, module): - # If an existing vgw name and type matches our args, then a match is considered to have been # found and we will take steps to delete it. changed = False params = dict() result = dict() - params['Name'] = module.params.get('name') - params['VpcId'] = module.params.get('vpc_id') - params['Type'] = module.params.get('type') - params['Tags'] = module.params.get('tags') - params['VpnGatewayIds'] = module.params.get('vpn_gateway_id') + params["Name"] = module.params.get("name") + params["VpcId"] = module.params.get("vpc_id") + params["Type"] = module.params.get("type") + params["Tags"] = module.params.get("tags") + params["VpnGatewayIds"] = module.params.get("vpn_gateway_id") # check if a gateway matching our module args already exists - if params['VpnGatewayIds']: - existing_vgw_with_id = find_vgw(client, module, [params['VpnGatewayIds']]) - if existing_vgw_with_id != [] and existing_vgw_with_id[0]['State'] != 'deleted': + if params["VpnGatewayIds"]: + existing_vgw_with_id = find_vgw(client, module, [params["VpnGatewayIds"]]) + if existing_vgw_with_id != [] and existing_vgw_with_id[0]["State"] != "deleted": existing_vgw = existing_vgw_with_id - if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached': - if params['VpcId']: - if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']: - module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console') + if existing_vgw[0]["VpcAttachments"] != [] and existing_vgw[0]["VpcAttachments"][0]["State"] == "attached": + if params["VpcId"]: + if params["VpcId"] != existing_vgw[0]["VpcAttachments"][0]["VpcId"]: + module.fail_json( + msg="The vpc-id provided does not match the vpc-id currently attached - please check the AWS console" + ) else: # detach the vpc from the vgw - detach_vgw(client, module, params['VpnGatewayIds'], params['VpcId']) - deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds']) + detach_vgw(client, module, params["VpnGatewayIds"], params["VpcId"]) + deleted_vgw = delete_vgw(client, module, params["VpnGatewayIds"]) changed = True else: # attempt to detach any attached vpcs - vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId'] - detach_vgw(client, module, params['VpnGatewayIds'], vpc_to_detach) - deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds']) + vpc_to_detach = existing_vgw[0]["VpcAttachments"][0]["VpcId"] + detach_vgw(client, module, params["VpnGatewayIds"], vpc_to_detach) + deleted_vgw = delete_vgw(client, module, params["VpnGatewayIds"]) changed = True else: # no vpc's are attached so attempt to delete the vgw - deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds']) + deleted_vgw = delete_vgw(client, module, params["VpnGatewayIds"]) changed = True else: @@ -457,20 +462,22 @@ def ensure_vgw_absent(client, module): else: # Check that a name and type argument has been supplied if no vgw-id - if not module.params.get('name') or not module.params.get('type'): - module.fail_json(msg='A name and type is required when no vgw-id and a status of \'absent\' is supplied') + if not module.params.get("name") or not module.params.get("type"): + module.fail_json(msg="A name and type is required when no vgw-id and a status of 'absent' is supplied") existing_vgw = find_vgw(client, module) - if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted': - vpn_gateway_id = existing_vgw[0]['VpnGatewayId'] - if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached': - if params['VpcId']: - if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']: - module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console') + if existing_vgw != [] and existing_vgw[0]["State"] != "deleted": + vpn_gateway_id = existing_vgw[0]["VpnGatewayId"] + if existing_vgw[0]["VpcAttachments"] != [] and existing_vgw[0]["VpcAttachments"][0]["State"] == "attached": + if params["VpcId"]: + if params["VpcId"] != existing_vgw[0]["VpcAttachments"][0]["VpcId"]: + module.fail_json( + msg="The vpc-id provided does not match the vpc-id currently attached - please check the AWS console" + ) else: # detach the vpc from the vgw - detach_vgw(client, module, vpn_gateway_id, params['VpcId']) + detach_vgw(client, module, vpn_gateway_id, params["VpcId"]) # now that the vpc has been detached, delete the vgw deleted_vgw = delete_vgw(client, module, vpn_gateway_id) @@ -478,7 +485,7 @@ def ensure_vgw_absent(client, module): else: # attempt to detach any attached vpcs - vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId'] + vpc_to_detach = existing_vgw[0]["VpcAttachments"][0]["VpcId"] detach_vgw(client, module, vpn_gateway_id, vpc_to_detach) changed = True @@ -500,29 +507,28 @@ def ensure_vgw_absent(client, module): def main(): argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), name=dict(), vpn_gateway_id=dict(), vpc_id=dict(), - asn=dict(type='int'), - wait_timeout=dict(type='int', default=320), - type=dict(default='ipsec.1', choices=['ipsec.1']), - tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), + asn=dict(type="int"), + wait_timeout=dict(type="int", default=320), + type=dict(default="ipsec.1", choices=["ipsec.1"]), + tags=dict(default=None, required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[['state', 'present', ['name']]]) + module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[["state", "present", ["name"]]]) - state = module.params.get('state').lower() + state = module.params.get("state").lower() - client = module.client('ec2', retry_decorator=VGWRetry.jittered_backoff(retries=10)) + client = module.client("ec2", retry_decorator=VGWRetry.jittered_backoff(retries=10)) - if state == 'present': + if state == "present": (changed, results) = ensure_vgw_present(client, module) else: (changed, results) = ensure_vgw_absent(client, module) module.exit_json(changed=changed, vgw=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ec2_vpc_vgw_info.py b/plugins/modules/ec2_vpc_vgw_info.py index c729b2fa5b8..d8bfcc78ecb 100644 --- a/plugins/modules/ec2_vpc_vgw_info.py +++ b/plugins/modules/ec2_vpc_vgw_info.py @@ -135,13 +135,13 @@ def get_virtual_gateway_info(virtual_gateway): - tags = virtual_gateway.get('Tags', []) + tags = virtual_gateway.get("Tags", []) resource_tags = boto3_tag_list_to_ansible_dict(tags) virtual_gateway_info = dict( - VpnGatewayId=virtual_gateway['VpnGatewayId'], - State=virtual_gateway['State'], - Type=virtual_gateway['Type'], - VpcAttachments=virtual_gateway['VpcAttachments'], + VpnGatewayId=virtual_gateway["VpnGatewayId"], + State=virtual_gateway["State"], + Type=virtual_gateway["Type"], + VpcAttachments=virtual_gateway["VpcAttachments"], Tags=tags, ResourceTags=resource_tags, ) @@ -151,32 +151,34 @@ def get_virtual_gateway_info(virtual_gateway): def list_virtual_gateways(client, module): params = dict() - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) + params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) if module.params.get("vpn_gateway_ids"): - params['VpnGatewayIds'] = module.params.get("vpn_gateway_ids") + params["VpnGatewayIds"] = module.params.get("vpn_gateway_ids") try: all_virtual_gateways = client.describe_vpn_gateways(**params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to list gateways") - return [camel_dict_to_snake_dict(get_virtual_gateway_info(vgw), ignore_list=['ResourceTags']) - for vgw in all_virtual_gateways['VpnGateways']] + return [ + camel_dict_to_snake_dict(get_virtual_gateway_info(vgw), ignore_list=["ResourceTags"]) + for vgw in all_virtual_gateways["VpnGateways"] + ] def main(): argument_spec = dict( - filters=dict(type='dict', default=dict()), - vpn_gateway_ids=dict(type='list', default=None, elements='str'), + filters=dict(type="dict", default=dict()), + vpn_gateway_ids=dict(type="list", default=None, elements="str"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) try: - connection = module.client('ec2') + connection = module.client("ec2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") # call your function here results = list_virtual_gateways(connection, module) @@ -184,5 +186,5 @@ def main(): module.exit_json(virtual_gateways=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ec2_vpc_vpn.py b/plugins/modules/ec2_vpc_vpn.py index b7e997fade0..8d8dc1467e1 100644 --- a/plugins/modules/ec2_vpc_vpn.py +++ b/plugins/modules/ec2_vpc_vpn.py @@ -321,11 +321,14 @@ def __init__(self, msg, exception=None): class VPNRetry(AWSRetry): @staticmethod def status_code_from_exception(error): - return (error.response['Error']['Code'], error.response['Error']['Message'],) + return ( + error.response["Error"]["Code"], + error.response["Error"]["Message"], + ) @staticmethod def found(response_code, catch_extra_error_codes=None): - retry_on = ['The maximum number of mutating objects has been reached.'] + retry_on = ["The maximum number of mutating objects has been reached."] if catch_extra_error_codes: retry_on.extend(catch_extra_error_codes) @@ -340,14 +343,14 @@ def found(response_code, catch_extra_error_codes=None): def find_connection(connection, module_params, vpn_connection_id=None): - ''' Looks for a unique VPN connection. Uses find_connection_response() to return the connection found, None, - or raise an error if there were multiple viable connections. ''' + """Looks for a unique VPN connection. Uses find_connection_response() to return the connection found, None, + or raise an error if there were multiple viable connections.""" - filters = module_params.get('filters') + filters = module_params.get("filters") # vpn_connection_id may be provided via module option; takes precedence over any filter values - if not vpn_connection_id and module_params.get('vpn_connection_id'): - vpn_connection_id = module_params.get('vpn_connection_id') + if not vpn_connection_id and module_params.get("vpn_connection_id"): + vpn_connection_id = module_params.get("vpn_connection_id") if not isinstance(vpn_connection_id, list) and vpn_connection_id: vpn_connection_id = [to_text(vpn_connection_id)] @@ -362,14 +365,13 @@ def find_connection(connection, module_params, vpn_connection_id=None): # see if there is a unique matching connection try: if vpn_connection_id: - existing_conn = connection.describe_vpn_connections(aws_retry=True, - VpnConnectionIds=vpn_connection_id, - Filters=formatted_filter) + existing_conn = connection.describe_vpn_connections( + aws_retry=True, VpnConnectionIds=vpn_connection_id, Filters=formatted_filter + ) else: existing_conn = connection.describe_vpn_connections(aws_retry=True, Filters=formatted_filter) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed while describing VPN connection.", - exception=e) + raise VPNConnectionException(msg="Failed while describing VPN connection.", exception=e) return find_connection_response(connections=existing_conn) @@ -377,48 +379,55 @@ def find_connection(connection, module_params, vpn_connection_id=None): def add_routes(connection, vpn_connection_id, routes_to_add): for route in routes_to_add: try: - connection.create_vpn_connection_route(aws_retry=True, - VpnConnectionId=vpn_connection_id, - DestinationCidrBlock=route) + connection.create_vpn_connection_route( + aws_retry=True, VpnConnectionId=vpn_connection_id, DestinationCidrBlock=route + ) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed while adding route {0} to the VPN connection {1}.".format(route, vpn_connection_id), - exception=e) + raise VPNConnectionException( + msg="Failed while adding route {0} to the VPN connection {1}.".format(route, vpn_connection_id), + exception=e, + ) def remove_routes(connection, vpn_connection_id, routes_to_remove): for route in routes_to_remove: try: - connection.delete_vpn_connection_route(aws_retry=True, - VpnConnectionId=vpn_connection_id, - DestinationCidrBlock=route) + connection.delete_vpn_connection_route( + aws_retry=True, VpnConnectionId=vpn_connection_id, DestinationCidrBlock=route + ) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed to remove route {0} from the VPN connection {1}.".format(route, vpn_connection_id), - exception=e) + raise VPNConnectionException( + msg="Failed to remove route {0} from the VPN connection {1}.".format(route, vpn_connection_id), + exception=e, + ) def create_filter(module_params, provided_filters): - """ Creates a filter using the user-specified parameters and unmodifiable options that may have been specified in the task """ - boto3ify_filter = {'cgw-config': 'customer-gateway-configuration', - 'static-routes-only': 'option.static-routes-only', - 'cidr': 'route.destination-cidr-block', - 'bgp': 'bgp-asn', - 'vpn': 'vpn-connection-id', - 'vgw': 'vpn-gateway-id', - 'tag-keys': 'tag-key', - 'tag-values': 'tag-value', - 'tags': 'tag', - 'cgw': 'customer-gateway-id'} + """Creates a filter using the user-specified parameters and unmodifiable options that may have been specified in the task""" + boto3ify_filter = { + "cgw-config": "customer-gateway-configuration", + "static-routes-only": "option.static-routes-only", + "cidr": "route.destination-cidr-block", + "bgp": "bgp-asn", + "vpn": "vpn-connection-id", + "vgw": "vpn-gateway-id", + "tag-keys": "tag-key", + "tag-values": "tag-value", + "tags": "tag", + "cgw": "customer-gateway-id", + } # unmodifiable options and their filter name counterpart - param_to_filter = {"customer_gateway_id": "customer-gateway-id", - "vpn_gateway_id": "vpn-gateway-id", - "vpn_connection_id": "vpn-connection-id"} + param_to_filter = { + "customer_gateway_id": "customer-gateway-id", + "vpn_gateway_id": "vpn-gateway-id", + "vpn_connection_id": "vpn-connection-id", + } flat_filter_dict = {} formatted_filter = [] for raw_param in dict(provided_filters): - # fix filter names to be recognized by boto3 if raw_param in boto3ify_filter: param = boto3ify_filter[raw_param] @@ -429,14 +438,14 @@ def create_filter(module_params, provided_filters): raise VPNConnectionException(msg="{0} is not a valid filter.".format(raw_param)) # reformat filters with special formats - if param == 'tag': + if param == "tag": for key in provided_filters[param]: - formatted_key = 'tag:' + key + formatted_key = "tag:" + key if isinstance(provided_filters[param][key], list): flat_filter_dict[formatted_key] = str(provided_filters[param][key]) else: flat_filter_dict[formatted_key] = [str(provided_filters[param][key])] - elif param == 'option.static-routes-only': + elif param == "option.static-routes-only": flat_filter_dict[param] = [str(provided_filters[param]).lower()] else: if isinstance(provided_filters[param], list): @@ -450,25 +459,25 @@ def create_filter(module_params, provided_filters): flat_filter_dict[param_to_filter[param]] = [module_params.get(param)] # change the flat dict into something boto3 will understand - formatted_filter = [{'Name': key, 'Values': value} for key, value in flat_filter_dict.items()] + formatted_filter = [{"Name": key, "Values": value} for key, value in flat_filter_dict.items()] return formatted_filter def find_connection_response(connections=None): - """ Determine if there is a viable unique match in the connections described. Returns the unique VPN connection if one is found, - returns None if the connection does not exist, raise an error if multiple matches are found. """ + """Determine if there is a viable unique match in the connections described. Returns the unique VPN connection if one is found, + returns None if the connection does not exist, raise an error if multiple matches are found.""" # Found no connections - if not connections or 'VpnConnections' not in connections: + if not connections or "VpnConnections" not in connections: return None # Too many results - elif connections and len(connections['VpnConnections']) > 1: + elif connections and len(connections["VpnConnections"]) > 1: viable = [] - for each in connections['VpnConnections']: + for each in connections["VpnConnections"]: # deleted connections are not modifiable - if each['State'] not in ("deleted", "deleting"): + if each["State"] not in ("deleted", "deleting"): viable.append(each) if len(viable) == 1: # Found one viable result; return unique match @@ -477,20 +486,31 @@ def find_connection_response(connections=None): # Found a result but it was deleted already; since there was only one viable result create a new one return None else: - raise VPNConnectionException(msg="More than one matching VPN connection was found. " - "To modify or delete a VPN please specify vpn_connection_id or add filters.") + raise VPNConnectionException( + msg="More than one matching VPN connection was found. " + "To modify or delete a VPN please specify vpn_connection_id or add filters." + ) # Found unique match - elif connections and len(connections['VpnConnections']) == 1: + elif connections and len(connections["VpnConnections"]) == 1: # deleted connections are not modifiable - if connections['VpnConnections'][0]['State'] not in ("deleted", "deleting"): - return connections['VpnConnections'][0] - - -def create_connection(connection, customer_gateway_id, static_only, vpn_gateway_id, connection_type, max_attempts, delay, tunnel_options=None): - """ Creates a VPN connection """ - - options = {'StaticRoutesOnly': static_only} + if connections["VpnConnections"][0]["State"] not in ("deleted", "deleting"): + return connections["VpnConnections"][0] + + +def create_connection( + connection, + customer_gateway_id, + static_only, + vpn_gateway_id, + connection_type, + max_attempts, + delay, + tunnel_options=None, +): + """Creates a VPN connection""" + + options = {"StaticRoutesOnly": static_only} if tunnel_options and len(tunnel_options) <= 2: t_opt = [] for m in tunnel_options: @@ -500,108 +520,102 @@ def create_connection(connection, customer_gateway_id, static_only, vpn_gateway_ raise TypeError("non-dict list member") t_opt.append(m) if t_opt: - options['TunnelOptions'] = t_opt + options["TunnelOptions"] = t_opt if not (customer_gateway_id and vpn_gateway_id): - raise VPNConnectionException(msg="No matching connection was found. To create a new connection you must provide " - "both vpn_gateway_id and customer_gateway_id.") + raise VPNConnectionException( + msg="No matching connection was found. To create a new connection you must provide " + "both vpn_gateway_id and customer_gateway_id." + ) try: - vpn = connection.create_vpn_connection(Type=connection_type, - CustomerGatewayId=customer_gateway_id, - VpnGatewayId=vpn_gateway_id, - Options=options) - connection.get_waiter('vpn_connection_available').wait( - VpnConnectionIds=[vpn['VpnConnection']['VpnConnectionId']], - WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts} + vpn = connection.create_vpn_connection( + Type=connection_type, CustomerGatewayId=customer_gateway_id, VpnGatewayId=vpn_gateway_id, Options=options + ) + connection.get_waiter("vpn_connection_available").wait( + VpnConnectionIds=[vpn["VpnConnection"]["VpnConnectionId"]], + WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts}, ) except WaiterError as e: - raise VPNConnectionException(msg="Failed to wait for VPN connection {0} to be available".format(vpn['VpnConnection']['VpnConnectionId']), - exception=e) + raise VPNConnectionException( + msg="Failed to wait for VPN connection {0} to be available".format(vpn["VpnConnection"]["VpnConnectionId"]), + exception=e, + ) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed to create VPN connection", - exception=e) + raise VPNConnectionException(msg="Failed to create VPN connection", exception=e) - return vpn['VpnConnection'] + return vpn["VpnConnection"] def delete_connection(connection, vpn_connection_id, delay, max_attempts): - """ Deletes a VPN connection """ + """Deletes a VPN connection""" try: connection.delete_vpn_connection(aws_retry=True, VpnConnectionId=vpn_connection_id) - connection.get_waiter('vpn_connection_deleted').wait( - VpnConnectionIds=[vpn_connection_id], - WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts} + connection.get_waiter("vpn_connection_deleted").wait( + VpnConnectionIds=[vpn_connection_id], WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts} ) except WaiterError as e: - raise VPNConnectionException(msg="Failed to wait for VPN connection {0} to be removed".format(vpn_connection_id), - exception=e) + raise VPNConnectionException( + msg="Failed to wait for VPN connection {0} to be removed".format(vpn_connection_id), exception=e + ) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed to delete the VPN connection: {0}".format(vpn_connection_id), - exception=e) + raise VPNConnectionException( + msg="Failed to delete the VPN connection: {0}".format(vpn_connection_id), exception=e + ) def add_tags(connection, vpn_connection_id, add): try: - connection.create_tags(aws_retry=True, - Resources=[vpn_connection_id], - Tags=add) + connection.create_tags(aws_retry=True, Resources=[vpn_connection_id], Tags=add) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed to add the tags: {0}.".format(add), - exception=e) + raise VPNConnectionException(msg="Failed to add the tags: {0}.".format(add), exception=e) def remove_tags(connection, vpn_connection_id, remove): # format tags since they are a list in the format ['tag1', 'tag2', 'tag3'] - key_dict_list = [{'Key': tag} for tag in remove] + key_dict_list = [{"Key": tag} for tag in remove] try: - connection.delete_tags(aws_retry=True, - Resources=[vpn_connection_id], - Tags=key_dict_list) + connection.delete_tags(aws_retry=True, Resources=[vpn_connection_id], Tags=key_dict_list) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed to remove the tags: {0}.".format(remove), - exception=e) + raise VPNConnectionException(msg="Failed to remove the tags: {0}.".format(remove), exception=e) def check_for_update(connection, module_params, vpn_connection_id): - """ Determines if there are any tags or routes that need to be updated. Ensures non-modifiable attributes aren't expected to change. """ - tags = module_params.get('tags') - routes = module_params.get('routes') - purge_tags = module_params.get('purge_tags') - purge_routes = module_params.get('purge_routes') + """Determines if there are any tags or routes that need to be updated. Ensures non-modifiable attributes aren't expected to change.""" + tags = module_params.get("tags") + routes = module_params.get("routes") + purge_tags = module_params.get("purge_tags") + purge_routes = module_params.get("purge_routes") vpn_connection = find_connection(connection, module_params, vpn_connection_id=vpn_connection_id) current_attrs = camel_dict_to_snake_dict(vpn_connection) # Initialize changes dict - changes = {'tags_to_add': [], - 'tags_to_remove': [], - 'routes_to_add': [], - 'routes_to_remove': []} + changes = {"tags_to_add": [], "tags_to_remove": [], "routes_to_add": [], "routes_to_remove": []} # Get changes to tags - current_tags = boto3_tag_list_to_ansible_dict(current_attrs.get('tags', []), u'key', u'value') + current_tags = boto3_tag_list_to_ansible_dict(current_attrs.get("tags", []), "key", "value") if tags is None: - changes['tags_to_remove'] = [] - changes['tags_to_add'] = [] + changes["tags_to_remove"] = [] + changes["tags_to_add"] = [] else: - tags_to_add, changes['tags_to_remove'] = compare_aws_tags(current_tags, tags, purge_tags) - changes['tags_to_add'] = ansible_dict_to_boto3_tag_list(tags_to_add) + tags_to_add, changes["tags_to_remove"] = compare_aws_tags(current_tags, tags, purge_tags) + changes["tags_to_add"] = ansible_dict_to_boto3_tag_list(tags_to_add) # Get changes to routes - if 'Routes' in vpn_connection: - current_routes = [route['DestinationCidrBlock'] for route in vpn_connection['Routes']] + if "Routes" in vpn_connection: + current_routes = [route["DestinationCidrBlock"] for route in vpn_connection["Routes"]] if purge_routes: - changes['routes_to_remove'] = [old_route for old_route in current_routes if old_route not in routes] - changes['routes_to_add'] = [new_route for new_route in routes if new_route not in current_routes] + changes["routes_to_remove"] = [old_route for old_route in current_routes if old_route not in routes] + changes["routes_to_add"] = [new_route for new_route in routes if new_route not in current_routes] # Check if nonmodifiable attributes are attempted to be modified for attribute in current_attrs: if attribute in ("tags", "routes", "state"): continue - elif attribute == 'options': - will_be = module_params.get('static_only', None) - is_now = bool(current_attrs[attribute]['static_routes_only']) - attribute = 'static_only' - elif attribute == 'type': + elif attribute == "options": + will_be = module_params.get("static_only", None) + is_now = bool(current_attrs[attribute]["static_routes_only"]) + attribute = "static_only" + elif attribute == "type": will_be = module_params.get("connection_type", None) is_now = current_attrs[attribute] else: @@ -609,110 +623,116 @@ def check_for_update(connection, module_params, vpn_connection_id): will_be = module_params.get(attribute, None) if will_be is not None and to_text(will_be) != to_text(is_now): - raise VPNConnectionException(msg="You cannot modify {0}, the current value of which is {1}. Modifiable VPN " - "connection attributes are tags and routes. The value you tried to change it to " - "is {2}.".format(attribute, is_now, will_be)) + raise VPNConnectionException( + msg="You cannot modify {0}, the current value of which is {1}. Modifiable VPN " + "connection attributes are tags and routes. The value you tried to change it to " + "is {2}.".format(attribute, is_now, will_be) + ) return changes def make_changes(connection, vpn_connection_id, changes): - """ changes is a dict with the keys 'tags_to_add', 'tags_to_remove', 'routes_to_add', 'routes_to_remove', - the values of which are lists (generated by check_for_update()). + """changes is a dict with the keys 'tags_to_add', 'tags_to_remove', 'routes_to_add', 'routes_to_remove', + the values of which are lists (generated by check_for_update()). """ changed = False - if changes['tags_to_add']: + if changes["tags_to_add"]: changed = True - add_tags(connection, vpn_connection_id, changes['tags_to_add']) + add_tags(connection, vpn_connection_id, changes["tags_to_add"]) - if changes['tags_to_remove']: + if changes["tags_to_remove"]: changed = True - remove_tags(connection, vpn_connection_id, changes['tags_to_remove']) + remove_tags(connection, vpn_connection_id, changes["tags_to_remove"]) - if changes['routes_to_add']: + if changes["routes_to_add"]: changed = True - add_routes(connection, vpn_connection_id, changes['routes_to_add']) + add_routes(connection, vpn_connection_id, changes["routes_to_add"]) - if changes['routes_to_remove']: + if changes["routes_to_remove"]: changed = True - remove_routes(connection, vpn_connection_id, changes['routes_to_remove']) + remove_routes(connection, vpn_connection_id, changes["routes_to_remove"]) return changed def get_check_mode_results(connection, module_params, vpn_connection_id=None, current_state=None): - """ Returns the changes that would be made to a VPN Connection """ - state = module_params.get('state') - if state == 'absent': + """Returns the changes that would be made to a VPN Connection""" + state = module_params.get("state") + if state == "absent": if vpn_connection_id: return True, {} else: return False, {} changed = False - results = {'customer_gateway_configuration': '', - 'customer_gateway_id': module_params.get('customer_gateway_id'), - 'vpn_gateway_id': module_params.get('vpn_gateway_id'), - 'options': {'static_routes_only': module_params.get('static_only')}, - 'routes': [module_params.get('routes')]} + results = { + "customer_gateway_configuration": "", + "customer_gateway_id": module_params.get("customer_gateway_id"), + "vpn_gateway_id": module_params.get("vpn_gateway_id"), + "options": {"static_routes_only": module_params.get("static_only")}, + "routes": [module_params.get("routes")], + } # get combined current tags and tags to set - present_tags = module_params.get('tags') + present_tags = module_params.get("tags") if present_tags is None: pass - elif current_state and 'Tags' in current_state: - current_tags = boto3_tag_list_to_ansible_dict(current_state['Tags']) - tags_to_add, tags_to_remove = compare_aws_tags(current_tags, present_tags, module_params.get('purge_tags')) + elif current_state and "Tags" in current_state: + current_tags = boto3_tag_list_to_ansible_dict(current_state["Tags"]) + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, present_tags, module_params.get("purge_tags")) changed |= bool(tags_to_remove) or bool(tags_to_add) - if module_params.get('purge_tags'): + if module_params.get("purge_tags"): current_tags = {} current_tags.update(present_tags) - results['tags'] = current_tags - elif module_params.get('tags'): + results["tags"] = current_tags + elif module_params.get("tags"): changed = True if present_tags: - results['tags'] = present_tags + results["tags"] = present_tags # get combined current routes and routes to add - present_routes = module_params.get('routes') - if current_state and 'Routes' in current_state: - current_routes = [route['DestinationCidrBlock'] for route in current_state['Routes']] - if module_params.get('purge_routes'): + present_routes = module_params.get("routes") + if current_state and "Routes" in current_state: + current_routes = [route["DestinationCidrBlock"] for route in current_state["Routes"]] + if module_params.get("purge_routes"): if set(current_routes) != set(present_routes): changed = True elif set(present_routes) != set(current_routes): if not set(present_routes) < set(current_routes): changed = True present_routes.extend([route for route in current_routes if route not in present_routes]) - elif module_params.get('routes'): + elif module_params.get("routes"): changed = True - results['routes'] = [{"destination_cidr_block": cidr, "state": "available"} for cidr in present_routes] + results["routes"] = [{"destination_cidr_block": cidr, "state": "available"} for cidr in present_routes] # return the vpn_connection_id if it's known if vpn_connection_id: - results['vpn_connection_id'] = vpn_connection_id + results["vpn_connection_id"] = vpn_connection_id else: changed = True - results['vpn_connection_id'] = 'vpn-XXXXXXXX' + results["vpn_connection_id"] = "vpn-XXXXXXXX" return changed, results def ensure_present(connection, module_params, check_mode=False): - """ Creates and adds tags to a VPN connection. If the connection already exists update tags. """ + """Creates and adds tags to a VPN connection. If the connection already exists update tags.""" vpn_connection = find_connection(connection, module_params) changed = False - delay = module_params.get('delay') - max_attempts = module_params.get('wait_timeout') // delay + delay = module_params.get("delay") + max_attempts = module_params.get("wait_timeout") // delay # No match but vpn_connection_id was specified. - if not vpn_connection and module_params.get('vpn_connection_id'): - raise VPNConnectionException(msg="There is no VPN connection available or pending with that id. Did you delete it?") + if not vpn_connection and module_params.get("vpn_connection_id"): + raise VPNConnectionException( + msg="There is no VPN connection available or pending with that id. Did you delete it?" + ) # Unique match was found. Check if attributes provided differ. elif vpn_connection: - vpn_connection_id = vpn_connection['VpnConnectionId'] + vpn_connection_id = vpn_connection["VpnConnectionId"] # check_for_update returns a dict with the keys tags_to_add, tags_to_remove, routes_to_add, routes_to_remove changes = check_for_update(connection, module_params, vpn_connection_id) if check_mode: @@ -724,38 +744,42 @@ def ensure_present(connection, module_params, check_mode=False): changed = True if check_mode: return get_check_mode_results(connection, module_params) - vpn_connection = create_connection(connection, - customer_gateway_id=module_params.get('customer_gateway_id'), - static_only=module_params.get('static_only'), - vpn_gateway_id=module_params.get('vpn_gateway_id'), - connection_type=module_params.get('connection_type'), - tunnel_options=module_params.get('tunnel_options'), - max_attempts=max_attempts, - delay=delay) - changes = check_for_update(connection, module_params, vpn_connection['VpnConnectionId']) - make_changes(connection, vpn_connection['VpnConnectionId'], changes) + vpn_connection = create_connection( + connection, + customer_gateway_id=module_params.get("customer_gateway_id"), + static_only=module_params.get("static_only"), + vpn_gateway_id=module_params.get("vpn_gateway_id"), + connection_type=module_params.get("connection_type"), + tunnel_options=module_params.get("tunnel_options"), + max_attempts=max_attempts, + delay=delay, + ) + changes = check_for_update(connection, module_params, vpn_connection["VpnConnectionId"]) + make_changes(connection, vpn_connection["VpnConnectionId"], changes) # get latest version if a change has been made and make tags output nice before returning it if vpn_connection: - vpn_connection = find_connection(connection, module_params, vpn_connection['VpnConnectionId']) - if 'Tags' in vpn_connection: - vpn_connection['Tags'] = boto3_tag_list_to_ansible_dict(vpn_connection['Tags']) + vpn_connection = find_connection(connection, module_params, vpn_connection["VpnConnectionId"]) + if "Tags" in vpn_connection: + vpn_connection["Tags"] = boto3_tag_list_to_ansible_dict(vpn_connection["Tags"]) return changed, vpn_connection def ensure_absent(connection, module_params, check_mode=False): - """ Deletes a VPN connection if it exists. """ + """Deletes a VPN connection if it exists.""" vpn_connection = find_connection(connection, module_params) if check_mode: - return get_check_mode_results(connection, module_params, vpn_connection['VpnConnectionId'] if vpn_connection else None) + return get_check_mode_results( + connection, module_params, vpn_connection["VpnConnectionId"] if vpn_connection else None + ) - delay = module_params.get('delay') - max_attempts = module_params.get('wait_timeout') // delay + delay = module_params.get("delay") + max_attempts = module_params.get("wait_timeout") // delay if vpn_connection: - delete_connection(connection, vpn_connection['VpnConnectionId'], delay=delay, max_attempts=max_attempts) + delete_connection(connection, vpn_connection["VpnConnectionId"], delay=delay, max_attempts=max_attempts) changed = True else: changed = False @@ -765,32 +789,31 @@ def ensure_absent(connection, module_params, check_mode=False): def main(): argument_spec = dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - filters=dict(type='dict', default={}), - vpn_gateway_id=dict(type='str'), - tags=dict(type='dict', aliases=['resource_tags']), - connection_type=dict(default='ipsec.1', type='str'), - tunnel_options=dict(no_log=True, type='list', default=[], elements='dict'), - static_only=dict(default=False, type='bool'), - customer_gateway_id=dict(type='str'), - vpn_connection_id=dict(type='str'), - purge_tags=dict(type='bool', default=True), - routes=dict(type='list', default=[], elements='str'), - purge_routes=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=600), - delay=dict(type='int', default=15), + state=dict(type="str", default="present", choices=["present", "absent"]), + filters=dict(type="dict", default={}), + vpn_gateway_id=dict(type="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + connection_type=dict(default="ipsec.1", type="str"), + tunnel_options=dict(no_log=True, type="list", default=[], elements="dict"), + static_only=dict(default=False, type="bool"), + customer_gateway_id=dict(type="str"), + vpn_connection_id=dict(type="str"), + purge_tags=dict(type="bool", default=True), + routes=dict(type="list", default=[], elements="str"), + purge_routes=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=600), + delay=dict(type="int", default=15), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True) - connection = module.client('ec2', retry_decorator=VPNRetry.jittered_backoff(retries=10)) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + connection = module.client("ec2", retry_decorator=VPNRetry.jittered_backoff(retries=10)) - state = module.params.get('state') + state = module.params.get("state") parameters = dict(module.params) try: - if state == 'present': + if state == "present": changed, response = ensure_present(connection, parameters, module.check_mode) - elif state == 'absent': + elif state == "absent": changed, response = ensure_absent(connection, parameters, module.check_mode) except VPNConnectionException as e: if e.exception: @@ -801,5 +824,5 @@ def main(): module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ec2_vpc_vpn_info.py b/plugins/modules/ec2_vpc_vpn_info.py index 5070af22266..95d8a8ca7e5 100644 --- a/plugins/modules/ec2_vpc_vpn_info.py +++ b/plugins/modules/ec2_vpc_vpn_info.py @@ -175,14 +175,14 @@ def date_handler(obj): - return obj.isoformat() if hasattr(obj, 'isoformat') else obj + return obj.isoformat() if hasattr(obj, "isoformat") else obj def list_vpn_connections(connection, module): params = dict() - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - params['VpnConnectionIds'] = module.params.get('vpn_connection_ids') + params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + params["VpnConnectionIds"] = module.params.get("vpn_connection_ids") try: result = json.loads(json.dumps(connection.describe_vpn_connections(**params), default=date_handler)) @@ -190,28 +190,29 @@ def list_vpn_connections(connection, module): module.fail_json_aws(e, msg="Cannot validate JSON data") except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Could not describe customer gateways") - snaked_vpn_connections = [camel_dict_to_snake_dict(vpn_connection) for vpn_connection in result['VpnConnections']] + snaked_vpn_connections = [camel_dict_to_snake_dict(vpn_connection) for vpn_connection in result["VpnConnections"]] if snaked_vpn_connections: for vpn_connection in snaked_vpn_connections: - vpn_connection['tags'] = boto3_tag_list_to_ansible_dict(vpn_connection.get('tags', [])) + vpn_connection["tags"] = boto3_tag_list_to_ansible_dict(vpn_connection.get("tags", [])) module.exit_json(changed=False, vpn_connections=snaked_vpn_connections) def main(): - argument_spec = dict( - vpn_connection_ids=dict(default=[], type='list', elements='str'), - filters=dict(default={}, type='dict') + vpn_connection_ids=dict(default=[], type="list", elements="str"), + filters=dict(default={}, type="dict"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - mutually_exclusive=[['vpn_connection_ids', 'filters']], - supports_check_mode=True) + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[["vpn_connection_ids", "filters"]], + supports_check_mode=True, + ) - connection = module.client('ec2') + connection = module.client("ec2") list_vpn_connections(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ec2_win_password.py b/plugins/modules/ec2_win_password.py index aec9940cd30..d1553c91aae 100644 --- a/plugins/modules/ec2_win_password.py +++ b/plugins/modules/ec2_win_password.py @@ -123,40 +123,40 @@ def setup_module_object(): argument_spec = dict( instance_id=dict(required=True), - key_file=dict(required=False, default=None, type='path'), + key_file=dict(required=False, default=None, type="path"), key_passphrase=dict(no_log=True, default=None, required=False), key_data=dict(no_log=True, default=None, required=False), - wait=dict(type='bool', default=False, required=False), - wait_timeout=dict(default=120, required=False, type='int'), + wait=dict(type="bool", default=False, required=False), + wait_timeout=dict(default=120, required=False, type="int"), ) - mutually_exclusive = [['key_file', 'key_data']] + mutually_exclusive = [["key_file", "key_data"]] module = AnsibleAWSModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive) return module def _get_password(module, client, instance_id): try: - data = client.get_password_data(aws_retry=True, InstanceId=instance_id)['PasswordData'] + data = client.get_password_data(aws_retry=True, InstanceId=instance_id)["PasswordData"] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg='Failed to get password data') + module.fail_json_aws(e, msg="Failed to get password data") return data def ec2_win_password(module): - instance_id = module.params.get('instance_id') - key_file = module.params.get('key_file') - if module.params.get('key_passphrase') is None: + instance_id = module.params.get("instance_id") + key_file = module.params.get("key_file") + if module.params.get("key_passphrase") is None: b_key_passphrase = None else: - b_key_passphrase = to_bytes(module.params.get('key_passphrase'), errors='surrogate_or_strict') - if module.params.get('key_data') is None: + b_key_passphrase = to_bytes(module.params.get("key_passphrase"), errors="surrogate_or_strict") + if module.params.get("key_data") is None: b_key_data = None else: - b_key_data = to_bytes(module.params.get('key_data'), errors='surrogate_or_strict') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') + b_key_data = to_bytes(module.params.get("key_data"), errors="surrogate_or_strict") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") - client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) if wait: start = datetime.datetime.now() @@ -178,7 +178,7 @@ def ec2_win_password(module): if key_file is not None and b_key_data is None: try: - with open(key_file, 'rb') as f: + with open(key_file, "rb") as f: key = load_pem_private_key(f.read(), b_key_passphrase, default_backend()) except IOError as e: # Handle bad files @@ -198,7 +198,7 @@ def ec2_win_password(module): decrypted = None if decrypted is None: - module.fail_json(msg="unable to decrypt password", win_password='', changed=False) + module.fail_json(msg="unable to decrypt password", win_password="", changed=False) else: if wait: elapsed = datetime.datetime.now() - start @@ -211,10 +211,10 @@ def main(): module = setup_module_object() if not HAS_CRYPTOGRAPHY: - module.fail_json(msg='cryptography package required for this module.') + module.fail_json(msg="cryptography package required for this module.") ec2_win_password(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ecs_attribute.py b/plugins/modules/ecs_attribute.py index c6931fc4f53..085761b19c3 100644 --- a/plugins/modules/ecs_attribute.py +++ b/plugins/modules/ecs_attribute.py @@ -135,7 +135,7 @@ def __iter__(self): @staticmethod def _validate_attrs(attrs): - return all(tuple(attr.keys()) in (('name', 'value'), ('value', 'name')) for attr in attrs) + return all(tuple(attr.keys()) in (("name", "value"), ("value", "name")) for attr in attrs) def _parse_attrs(self, attrs): attrs_parsed = [] @@ -144,20 +144,18 @@ def _parse_attrs(self, attrs): if len(attr) != 1: self.module.fail_json(msg="Incorrect attribute format - %s" % str(attr)) name, value = list(attr.items())[0] - attrs_parsed.append({'name': name, 'value': value}) + attrs_parsed.append({"name": name, "value": value}) elif isinstance(attr, str): - attrs_parsed.append({'name': attr, 'value': None}) + attrs_parsed.append({"name": attr, "value": None}) else: self.module.fail_json(msg="Incorrect attributes format - %s" % str(attrs)) return attrs_parsed def _setup_attr_obj(self, ecs_arn, name, value=None, skip_value=False): - attr_obj = {'targetType': 'container-instance', - 'targetId': ecs_arn, - 'name': name} + attr_obj = {"targetType": "container-instance", "targetId": ecs_arn, "name": name} if not skip_value and value is not None: - attr_obj['value'] = value + attr_obj["value"] = value return attr_obj @@ -186,23 +184,25 @@ def __init__(self, module, cluster, ec2_id): self.ec2_id = ec2_id try: - self.ecs = module.client('ecs') + self.ecs = module.client("ecs") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") self.ecs_arn = self._get_ecs_arn() def _get_ecs_arn(self): try: - ecs_instances_arns = self.ecs.list_container_instances(cluster=self.cluster)['containerInstanceArns'] - ec2_instances = self.ecs.describe_container_instances(cluster=self.cluster, - containerInstances=ecs_instances_arns)['containerInstances'] + ecs_instances_arns = self.ecs.list_container_instances(cluster=self.cluster)["containerInstanceArns"] + ec2_instances = self.ecs.describe_container_instances( + cluster=self.cluster, containerInstances=ecs_instances_arns + )["containerInstances"] except (ClientError, EndpointConnectionError) as e: self.module.fail_json(msg="Can't connect to the cluster - %s" % str(e)) try: - ecs_arn = next(inst for inst in ec2_instances - if inst['ec2InstanceId'] == self.ec2_id)['containerInstanceArn'] + ecs_arn = next(inst for inst in ec2_instances if inst["ec2InstanceId"] == self.ec2_id)[ + "containerInstanceArn" + ] except StopIteration: self.module.fail_json(msg="EC2 instance Id not found in ECS cluster - %s" % str(self.cluster)) @@ -211,16 +211,16 @@ def _get_ecs_arn(self): def attrs_put(self, attrs): """Puts attributes on ECS container instance""" try: - self.ecs.put_attributes(cluster=self.cluster, - attributes=attrs.get_for_ecs_arn(self.ecs_arn)) + self.ecs.put_attributes(cluster=self.cluster, attributes=attrs.get_for_ecs_arn(self.ecs_arn)) except ClientError as e: self.module.fail_json(msg=str(e)) def attrs_delete(self, attrs): """Deletes attributes from ECS container instance.""" try: - self.ecs.delete_attributes(cluster=self.cluster, - attributes=attrs.get_for_ecs_arn(self.ecs_arn, skip_value=True)) + self.ecs.delete_attributes( + cluster=self.cluster, attributes=attrs.get_for_ecs_arn(self.ecs_arn, skip_value=True) + ) except ClientError as e: self.module.fail_json(msg=str(e)) @@ -229,33 +229,33 @@ def attrs_get_by_name(self, attrs): Returns EcsAttributes object containing attributes from ECS container instance with names matching to attrs.attributes (EcsAttributes Object). """ - attr_objs = [{'targetType': 'container-instance', 'attributeName': attr['name']} - for attr in attrs] + attr_objs = [{"targetType": "container-instance", "attributeName": attr["name"]} for attr in attrs] try: - matched_ecs_targets = [attr_found for attr_obj in attr_objs - for attr_found in self.ecs.list_attributes(cluster=self.cluster, **attr_obj)['attributes']] + matched_ecs_targets = [ + attr_found + for attr_obj in attr_objs + for attr_found in self.ecs.list_attributes(cluster=self.cluster, **attr_obj)["attributes"] + ] except ClientError as e: self.module.fail_json(msg="Can't connect to the cluster - %s" % str(e)) - matched_objs = [target for target in matched_ecs_targets - if target['targetId'] == self.ecs_arn] + matched_objs = [target for target in matched_ecs_targets if target["targetId"] == self.ecs_arn] - results = [{'name': match['name'], 'value': match.get('value', None)} - for match in matched_objs] + results = [{"name": match["name"], "value": match.get("value", None)} for match in matched_objs] return EcsAttributes(self.module, results) def main(): argument_spec = dict( - state=dict(required=False, default='present', choices=['present', 'absent']), - cluster=dict(required=True, type='str'), - ec2_instance_id=dict(required=True, type='str'), - attributes=dict(required=True, type='list', elements='dict'), + state=dict(required=False, default="present", choices=["present", "absent"]), + cluster=dict(required=True, type="str"), + ec2_instance_id=dict(required=True, type="str"), + attributes=dict(required=True, type="list", elements="dict"), ) - required_together = [['cluster', 'ec2_instance_id', 'attributes']] + required_together = [["cluster", "ec2_instance_id", "attributes"]] module = AnsibleAWSModule( argument_spec=argument_spec, @@ -263,39 +263,43 @@ def main(): required_together=required_together, ) - cluster = module.params['cluster'] - ec2_instance_id = module.params['ec2_instance_id'] - attributes = module.params['attributes'] + cluster = module.params["cluster"] + ec2_instance_id = module.params["ec2_instance_id"] + attributes = module.params["attributes"] conti = Ec2EcsInstance(module, cluster, ec2_instance_id) attrs = EcsAttributes(module, attributes) - results = {'changed': False, - 'attributes': [ - {'cluster': cluster, - 'ec2_instance_id': ec2_instance_id, - 'attributes': attributes} - ]} + results = { + "changed": False, + "attributes": [ + { + "cluster": cluster, + "ec2_instance_id": ec2_instance_id, + "attributes": attributes, + } + ], + } attrs_present = conti.attrs_get_by_name(attrs) - if module.params['state'] == 'present': + if module.params["state"] == "present": attrs_diff = attrs.diff(attrs_present) if not attrs_diff: module.exit_json(**results) conti.attrs_put(attrs_diff) - results['changed'] = True + results["changed"] = True - elif module.params['state'] == 'absent': + elif module.params["state"] == "absent": if not attrs_present: module.exit_json(**results) conti.attrs_delete(attrs_present) - results['changed'] = True + results["changed"] = True module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ecs_cluster.py b/plugins/modules/ecs_cluster.py index 7520cd0abc9..e627cd98f1b 100644 --- a/plugins/modules/ecs_cluster.py +++ b/plugins/modules/ecs_cluster.py @@ -182,27 +182,25 @@ class EcsClusterManager: def __init__(self, module): self.module = module try: - self.ecs = module.client('ecs') + self.ecs = module.client("ecs") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - def find_in_array(self, array_of_clusters, cluster_name, field_name='clusterArn'): + def find_in_array(self, array_of_clusters, cluster_name, field_name="clusterArn"): for c in array_of_clusters: if c[field_name].endswith(cluster_name): return c return None def describe_cluster(self, cluster_name): - response = self.ecs.describe_clusters(clusters=[ - cluster_name - ]) - if len(response['failures']) > 0: - c = self.find_in_array(response['failures'], cluster_name, 'arn') - if c and c['reason'] == 'MISSING': + response = self.ecs.describe_clusters(clusters=[cluster_name]) + if len(response["failures"]) > 0: + c = self.find_in_array(response["failures"], cluster_name, "arn") + if c and c["reason"] == "MISSING": return None # fall thru and look through found ones - if len(response['clusters']) > 0: - c = self.find_in_array(response['clusters'], cluster_name) + if len(response["clusters"]) > 0: + c = self.find_in_array(response["clusters"], cluster_name) if c: return c raise Exception("Unknown problem describing cluster %s." % cluster_name) @@ -210,48 +208,49 @@ def describe_cluster(self, cluster_name): def create_cluster(self, cluster_name, capacity_providers, capacity_provider_strategy): params = dict(clusterName=cluster_name) if capacity_providers: - params['capacityProviders'] = snake_dict_to_camel_dict(capacity_providers) + params["capacityProviders"] = snake_dict_to_camel_dict(capacity_providers) if capacity_provider_strategy: - params['defaultCapacityProviderStrategy'] = snake_dict_to_camel_dict(capacity_provider_strategy) + params["defaultCapacityProviderStrategy"] = snake_dict_to_camel_dict(capacity_provider_strategy) response = self.ecs.create_cluster(**params) - return response['cluster'] + return response["cluster"] def update_cluster(self, cluster_name, capacity_providers, capacity_provider_strategy): params = dict(cluster=cluster_name) if capacity_providers: - params['capacityProviders'] = snake_dict_to_camel_dict(capacity_providers) + params["capacityProviders"] = snake_dict_to_camel_dict(capacity_providers) else: - params['capacityProviders'] = [] + params["capacityProviders"] = [] if capacity_provider_strategy: - params['defaultCapacityProviderStrategy'] = snake_dict_to_camel_dict(capacity_provider_strategy) + params["defaultCapacityProviderStrategy"] = snake_dict_to_camel_dict(capacity_provider_strategy) else: - params['defaultCapacityProviderStrategy'] = [] + params["defaultCapacityProviderStrategy"] = [] response = self.ecs.put_cluster_capacity_providers(**params) - return response['cluster'] + return response["cluster"] def delete_cluster(self, clusterName): return self.ecs.delete_cluster(cluster=clusterName) def main(): - argument_spec = dict( - state=dict(required=True, choices=['present', 'absent', 'has_instances']), - name=dict(required=True, type='str'), - delay=dict(required=False, type='int', default=10), - repeat=dict(required=False, type='int', default=10), - purge_capacity_providers=dict(required=False, type='bool', default=False), - capacity_providers=dict(required=False, type='list', elements='str'), - capacity_provider_strategy=dict(required=False, - type='list', - elements='dict', - options=dict(capacity_provider=dict(type='str'), - weight=dict(type='int'), - base=dict(type='int', default=0) - ) - ), + state=dict(required=True, choices=["present", "absent", "has_instances"]), + name=dict(required=True, type="str"), + delay=dict(required=False, type="int", default=10), + repeat=dict(required=False, type="int", default=10), + purge_capacity_providers=dict(required=False, type="bool", default=False), + capacity_providers=dict(required=False, type="list", elements="str"), + capacity_provider_strategy=dict( + required=False, + type="list", + elements="dict", + options=dict( + capacity_provider=dict(type="str"), + weight=dict(type="int"), + base=dict(type="int", default=0), + ), + ), ) - required_together = [['state', 'name']] + required_together = [["state", "name"]] module = AnsibleAWSModule( argument_spec=argument_spec, @@ -261,19 +260,19 @@ def main(): cluster_mgr = EcsClusterManager(module) try: - existing = cluster_mgr.describe_cluster(module.params['name']) + existing = cluster_mgr.describe_cluster(module.params["name"]) except Exception as e: - module.fail_json(msg="Exception describing cluster '" + module.params['name'] + "': " + str(e)) + module.fail_json(msg="Exception describing cluster '" + module.params["name"] + "': " + str(e)) results = dict(changed=False) - if module.params['state'] == 'present': + if module.params["state"] == "present": # Pull requested and existing capacity providers and strategies. - purge_capacity_providers = module.params['purge_capacity_providers'] - requested_cp = module.params['capacity_providers'] - requested_cps = module.params['capacity_provider_strategy'] - if existing and 'status' in existing and existing['status'] == "ACTIVE": - existing_cp = existing['capacityProviders'] - existing_cps = existing['defaultCapacityProviderStrategy'] + purge_capacity_providers = module.params["purge_capacity_providers"] + requested_cp = module.params["capacity_providers"] + requested_cps = module.params["capacity_provider_strategy"] + if existing and "status" in existing and existing["status"] == "ACTIVE": + existing_cp = existing["capacityProviders"] + existing_cps = existing["defaultCapacityProviderStrategy"] if requested_cp is None: requested_cp = [] @@ -292,9 +291,12 @@ def main(): # Unless purge_capacity_providers is true, we will not be updating the providers or strategy. if not purge_capacity_providers: - module.deprecate('After 2024-06-01 the default value of purge_capacity_providers will change from false to true.' - ' To maintain the existing behaviour explicitly set purge_capacity_providers=true', - date='2024-06-01', collection_name='community.aws') + module.deprecate( + "After 2024-06-01 the default value of purge_capacity_providers will change from false to true." + " To maintain the existing behaviour explicitly set purge_capacity_providers=true", + date="2024-06-01", + collection_name="community.aws", + ) cps_update_needed = False requested_cp = existing_cp requested_cps = existing_cps @@ -302,57 +304,67 @@ def main(): # If either the providers or strategy differ, update the cluster. if requested_cp != existing_cp or cps_update_needed: if not module.check_mode: - results['cluster'] = cluster_mgr.update_cluster(cluster_name=module.params['name'], - capacity_providers=requested_cp, - capacity_provider_strategy=requested_cps) - results['changed'] = True + results["cluster"] = cluster_mgr.update_cluster( + cluster_name=module.params["name"], + capacity_providers=requested_cp, + capacity_provider_strategy=requested_cps, + ) + results["changed"] = True else: - results['cluster'] = existing + results["cluster"] = existing else: if not module.check_mode: # doesn't exist. create it. - results['cluster'] = cluster_mgr.create_cluster(cluster_name=module.params['name'], - capacity_providers=requested_cp, - capacity_provider_strategy=requested_cps) - results['changed'] = True + results["cluster"] = cluster_mgr.create_cluster( + cluster_name=module.params["name"], + capacity_providers=requested_cp, + capacity_provider_strategy=requested_cps, + ) + results["changed"] = True # delete the cluster - elif module.params['state'] == 'absent': + elif module.params["state"] == "absent": if not existing: pass else: # it exists, so we should delete it and mark changed. # return info about the cluster deleted - results['cluster'] = existing - if 'status' in existing and existing['status'] == "INACTIVE": - results['changed'] = False + results["cluster"] = existing + if "status" in existing and existing["status"] == "INACTIVE": + results["changed"] = False else: if not module.check_mode: - cluster_mgr.delete_cluster(module.params['name']) - results['changed'] = True - elif module.params['state'] == 'has_instances': + cluster_mgr.delete_cluster(module.params["name"]) + results["changed"] = True + elif module.params["state"] == "has_instances": if not existing: - module.fail_json(msg="Cluster '" + module.params['name'] + " not found.") + module.fail_json(msg="Cluster '" + module.params["name"] + " not found.") return # it exists, so we should delete it and mark changed. # return info about the cluster deleted - delay = module.params['delay'] - repeat = module.params['repeat'] + delay = module.params["delay"] + repeat = module.params["repeat"] time.sleep(delay) count = 0 for i in range(repeat): - existing = cluster_mgr.describe_cluster(module.params['name']) - count = existing['registeredContainerInstancesCount'] + existing = cluster_mgr.describe_cluster(module.params["name"]) + count = existing["registeredContainerInstancesCount"] if count > 0: - results['changed'] = True + results["changed"] = True break time.sleep(delay) if count == 0 and i is repeat - 1: - module.fail_json(msg="Cluster instance count still zero after " + str(repeat) + " tries of " + str(delay) + " seconds each.") + module.fail_json( + msg="Cluster instance count still zero after " + + str(repeat) + + " tries of " + + str(delay) + + " seconds each." + ) return module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ecs_ecr.py b/plugins/modules/ecs_ecr.py index 4b5ce1ebe75..1e6efd7b331 100644 --- a/plugins/modules/ecs_ecr.py +++ b/plugins/modules/ecs_ecr.py @@ -245,45 +245,46 @@ def build_kwargs(registry_id): class EcsEcr: def __init__(self, module): - self.ecr = module.client('ecr') - self.sts = module.client('sts') + self.ecr = module.client("ecr") + self.sts = module.client("sts") self.check_mode = module.check_mode self.changed = False self.skipped = False def get_repository(self, registry_id, name): try: - res = self.ecr.describe_repositories( - repositoryNames=[name], **build_kwargs(registry_id)) - repos = res.get('repositories') + res = self.ecr.describe_repositories(repositoryNames=[name], **build_kwargs(registry_id)) + repos = res.get("repositories") return repos and repos[0] - except is_boto3_error_code('RepositoryNotFoundException'): + except is_boto3_error_code("RepositoryNotFoundException"): return None def get_repository_policy(self, registry_id, name): try: - res = self.ecr.get_repository_policy( - repositoryName=name, **build_kwargs(registry_id)) - text = res.get('policyText') + res = self.ecr.get_repository_policy(repositoryName=name, **build_kwargs(registry_id)) + text = res.get("policyText") return text and json.loads(text) - except is_boto3_error_code(['RepositoryNotFoundException', 'RepositoryPolicyNotFoundException']): + except is_boto3_error_code(["RepositoryNotFoundException", "RepositoryPolicyNotFoundException"]): return None def create_repository(self, registry_id, name, image_tag_mutability, encryption_configuration): if registry_id: - default_registry_id = self.sts.get_caller_identity().get('Account') + default_registry_id = self.sts.get_caller_identity().get("Account") if registry_id != default_registry_id: - raise Exception('Cannot create repository in registry {0}.' - 'Would be created in {1} instead.'.format(registry_id, default_registry_id)) + raise Exception( + "Cannot create repository in registry {0}." + "Would be created in {1} instead.".format(registry_id, default_registry_id) + ) if encryption_configuration is None: - encryption_configuration = dict(encryptionType='AES256') + encryption_configuration = dict(encryptionType="AES256") if not self.check_mode: repo = self.ecr.create_repository( repositoryName=name, imageTagMutability=image_tag_mutability, - encryptionConfiguration=encryption_configuration).get('repository') + encryptionConfiguration=encryption_configuration, + ).get("repository") self.changed = True return repo else: @@ -293,10 +294,8 @@ def create_repository(self, registry_id, name, image_tag_mutability, encryption_ def set_repository_policy(self, registry_id, name, policy_text, force): if not self.check_mode: policy = self.ecr.set_repository_policy( - repositoryName=name, - policyText=policy_text, - force=force, - **build_kwargs(registry_id)) + repositoryName=name, policyText=policy_text, force=force, **build_kwargs(registry_id) + ) self.changed = True return policy else: @@ -304,15 +303,13 @@ def set_repository_policy(self, registry_id, name, policy_text, force): if self.get_repository(registry_id, name) is None: printable = name if registry_id: - printable = '{0}:{1}'.format(registry_id, name) - raise Exception( - 'could not find repository {0}'.format(printable)) + printable = "{0}:{1}".format(registry_id, name) + raise Exception("could not find repository {0}".format(printable)) return def delete_repository(self, registry_id, name, force): if not self.check_mode: - repo = self.ecr.delete_repository( - repositoryName=name, force=force, **build_kwargs(registry_id)) + repo = self.ecr.delete_repository(repositoryName=name, force=force, **build_kwargs(registry_id)) self.changed = True return repo else: @@ -324,8 +321,7 @@ def delete_repository(self, registry_id, name, force): def delete_repository_policy(self, registry_id, name): if not self.check_mode: - policy = self.ecr.delete_repository_policy( - repositoryName=name, **build_kwargs(registry_id)) + policy = self.ecr.delete_repository_policy(repositoryName=name, **build_kwargs(registry_id)) self.changed = True return policy else: @@ -337,36 +333,33 @@ def delete_repository_policy(self, registry_id, name): def put_image_tag_mutability(self, registry_id, name, new_mutability_configuration): repo = self.get_repository(registry_id, name) - current_mutability_configuration = repo.get('imageTagMutability') + current_mutability_configuration = repo.get("imageTagMutability") if current_mutability_configuration != new_mutability_configuration: if not self.check_mode: self.ecr.put_image_tag_mutability( - repositoryName=name, - imageTagMutability=new_mutability_configuration, - **build_kwargs(registry_id)) + repositoryName=name, imageTagMutability=new_mutability_configuration, **build_kwargs(registry_id) + ) else: self.skipped = True self.changed = True - repo['imageTagMutability'] = new_mutability_configuration + repo["imageTagMutability"] = new_mutability_configuration return repo def get_lifecycle_policy(self, registry_id, name): try: - res = self.ecr.get_lifecycle_policy( - repositoryName=name, **build_kwargs(registry_id)) - text = res.get('lifecyclePolicyText') + res = self.ecr.get_lifecycle_policy(repositoryName=name, **build_kwargs(registry_id)) + text = res.get("lifecyclePolicyText") return text and json.loads(text) - except is_boto3_error_code(['LifecyclePolicyNotFoundException', 'RepositoryNotFoundException']): + except is_boto3_error_code(["LifecyclePolicyNotFoundException", "RepositoryNotFoundException"]): return None def put_lifecycle_policy(self, registry_id, name, policy_text): if not self.check_mode: policy = self.ecr.put_lifecycle_policy( - repositoryName=name, - lifecyclePolicyText=policy_text, - **build_kwargs(registry_id)) + repositoryName=name, lifecyclePolicyText=policy_text, **build_kwargs(registry_id) + ) self.changed = True return policy else: @@ -374,15 +367,13 @@ def put_lifecycle_policy(self, registry_id, name, policy_text): if self.get_repository(registry_id, name) is None: printable = name if registry_id: - printable = '{0}:{1}'.format(registry_id, name) - raise Exception( - 'could not find repository {0}'.format(printable)) + printable = "{0}:{1}".format(registry_id, name) + raise Exception("could not find repository {0}".format(printable)) return def purge_lifecycle_policy(self, registry_id, name): if not self.check_mode: - policy = self.ecr.delete_lifecycle_policy( - repositoryName=name, **build_kwargs(registry_id)) + policy = self.ecr.delete_lifecycle_policy(repositoryName=name, **build_kwargs(registry_id)) self.changed = True return policy else: @@ -396,14 +387,11 @@ def put_image_scanning_configuration(self, registry_id, name, scan_on_push): if not self.check_mode: if registry_id: scan = self.ecr.put_image_scanning_configuration( - registryId=registry_id, - repositoryName=name, - imageScanningConfiguration={'scanOnPush': scan_on_push} + registryId=registry_id, repositoryName=name, imageScanningConfiguration={"scanOnPush": scan_on_push} ) else: scan = self.ecr.put_image_scanning_configuration( - repositoryName=name, - imageScanningConfiguration={'scanOnPush': scan_on_push} + repositoryName=name, imageScanningConfiguration={"scanOnPush": scan_on_push} ) self.changed = True return scan @@ -413,11 +401,11 @@ def put_image_scanning_configuration(self, registry_id, name, scan_on_push): def sort_lists_of_strings(policy): - for statement_index in range(0, len(policy.get('Statement', []))): - for key in policy['Statement'][statement_index]: - value = policy['Statement'][statement_index][key] + for statement_index in range(0, len(policy.get("Statement", []))): + for key in policy["Statement"][statement_index]: + value = policy["Statement"][statement_index][key] if isinstance(value, list) and all(isinstance(item, string_types) for item in value): - policy['Statement'][statement_index][key] = sorted(value) + policy["Statement"][statement_index][key] = sorted(value) return policy @@ -425,145 +413,138 @@ def run(ecr, params): # type: (EcsEcr, dict, int) -> Tuple[bool, dict] result = {} try: - name = params['name'] - state = params['state'] - policy_text = params['policy'] - purge_policy = params['purge_policy'] - force_absent = params['force_absent'] - registry_id = params['registry_id'] - force_set_policy = params['force_set_policy'] - image_tag_mutability = params['image_tag_mutability'].upper() - lifecycle_policy_text = params['lifecycle_policy'] - purge_lifecycle_policy = params['purge_lifecycle_policy'] - scan_on_push = params['scan_on_push'] - encryption_configuration = snake_dict_to_camel_dict(params['encryption_configuration']) + name = params["name"] + state = params["state"] + policy_text = params["policy"] + purge_policy = params["purge_policy"] + force_absent = params["force_absent"] + registry_id = params["registry_id"] + force_set_policy = params["force_set_policy"] + image_tag_mutability = params["image_tag_mutability"].upper() + lifecycle_policy_text = params["lifecycle_policy"] + purge_lifecycle_policy = params["purge_lifecycle_policy"] + scan_on_push = params["scan_on_push"] + encryption_configuration = snake_dict_to_camel_dict(params["encryption_configuration"]) # Parse policies, if they are given try: policy = policy_text and json.loads(policy_text) except ValueError: - result['policy'] = policy_text - result['msg'] = 'Could not parse policy' + result["policy"] = policy_text + result["msg"] = "Could not parse policy" return False, result try: - lifecycle_policy = \ - lifecycle_policy_text and json.loads(lifecycle_policy_text) + lifecycle_policy = lifecycle_policy_text and json.loads(lifecycle_policy_text) except ValueError: - result['lifecycle_policy'] = lifecycle_policy_text - result['msg'] = 'Could not parse lifecycle_policy' + result["lifecycle_policy"] = lifecycle_policy_text + result["msg"] = "Could not parse lifecycle_policy" return False, result - result['state'] = state - result['created'] = False + result["state"] = state + result["created"] = False repo = ecr.get_repository(registry_id, name) - if state == 'present': - result['created'] = False + if state == "present": + result["created"] = False if not repo: - repo = ecr.create_repository( - registry_id, name, image_tag_mutability, encryption_configuration) - result['changed'] = True - result['created'] = True + repo = ecr.create_repository(registry_id, name, image_tag_mutability, encryption_configuration) + result["changed"] = True + result["created"] = True else: if encryption_configuration is not None: - if repo.get('encryptionConfiguration') != encryption_configuration: - result['msg'] = 'Cannot modify repository encryption type' + if repo.get("encryptionConfiguration") != encryption_configuration: + result["msg"] = "Cannot modify repository encryption type" return False, result repo = ecr.put_image_tag_mutability(registry_id, name, image_tag_mutability) - result['repository'] = repo + result["repository"] = repo if purge_lifecycle_policy: - original_lifecycle_policy = \ - ecr.get_lifecycle_policy(registry_id, name) + original_lifecycle_policy = ecr.get_lifecycle_policy(registry_id, name) - result['lifecycle_policy'] = None + result["lifecycle_policy"] = None if original_lifecycle_policy: ecr.purge_lifecycle_policy(registry_id, name) - result['changed'] = True + result["changed"] = True elif lifecycle_policy_text is not None: try: - result['lifecycle_policy'] = lifecycle_policy - original_lifecycle_policy = ecr.get_lifecycle_policy( - registry_id, name) + result["lifecycle_policy"] = lifecycle_policy + original_lifecycle_policy = ecr.get_lifecycle_policy(registry_id, name) if compare_policies(original_lifecycle_policy, lifecycle_policy): - ecr.put_lifecycle_policy(registry_id, name, - lifecycle_policy_text) - result['changed'] = True + ecr.put_lifecycle_policy(registry_id, name, lifecycle_policy_text) + result["changed"] = True except Exception: # Some failure w/ the policy. It's helpful to know what the # policy is. - result['lifecycle_policy'] = lifecycle_policy_text + result["lifecycle_policy"] = lifecycle_policy_text raise if purge_policy: original_policy = ecr.get_repository_policy(registry_id, name) - result['policy'] = None + result["policy"] = None if original_policy: ecr.delete_repository_policy(registry_id, name) - result['changed'] = True + result["changed"] = True elif policy_text is not None: try: # Sort any lists containing only string types policy = sort_lists_of_strings(policy) - result['policy'] = policy + result["policy"] = policy - original_policy = ecr.get_repository_policy( - registry_id, name) + original_policy = ecr.get_repository_policy(registry_id, name) if original_policy: original_policy = sort_lists_of_strings(original_policy) if compare_policies(original_policy, policy): - ecr.set_repository_policy( - registry_id, name, policy_text, force_set_policy) - result['changed'] = True + ecr.set_repository_policy(registry_id, name, policy_text, force_set_policy) + result["changed"] = True except Exception: # Some failure w/ the policy. It's helpful to know what the # policy is. - result['policy'] = policy_text + result["policy"] = policy_text raise else: original_policy = ecr.get_repository_policy(registry_id, name) if original_policy: - result['policy'] = original_policy + result["policy"] = original_policy original_scan_on_push = ecr.get_repository(registry_id, name) if original_scan_on_push is not None: - if scan_on_push != original_scan_on_push['imageScanningConfiguration']['scanOnPush']: - result['changed'] = True - result['repository']['imageScanningConfiguration']['scanOnPush'] = scan_on_push + if scan_on_push != original_scan_on_push["imageScanningConfiguration"]["scanOnPush"]: + result["changed"] = True + result["repository"]["imageScanningConfiguration"]["scanOnPush"] = scan_on_push response = ecr.put_image_scanning_configuration(registry_id, name, scan_on_push) - elif state == 'absent': - result['name'] = name + elif state == "absent": + result["name"] = name if repo: ecr.delete_repository(registry_id, name, force_absent) - result['changed'] = True + result["changed"] = True except Exception as err: msg = str(err) if isinstance(err, botocore.exceptions.ClientError): msg = boto_exception(err) - result['msg'] = msg - result['exception'] = traceback.format_exc() + result["msg"] = msg + result["exception"] = traceback.format_exc() return False, result if ecr.skipped: - result['skipped'] = True + result["skipped"] = True if ecr.changed: - result['changed'] = True + result["changed"] = True return True, result @@ -572,34 +553,37 @@ def main(): argument_spec = dict( name=dict(required=True), registry_id=dict(required=False), - state=dict(required=False, choices=['present', 'absent'], - default='present'), - force_absent=dict(required=False, type='bool', default=False), - force_set_policy=dict(required=False, type='bool', default=False), - policy=dict(required=False, type='json'), - image_tag_mutability=dict(required=False, choices=['mutable', 'immutable'], - default='mutable'), - purge_policy=dict(required=False, type='bool'), - lifecycle_policy=dict(required=False, type='json'), - purge_lifecycle_policy=dict(required=False, type='bool'), - scan_on_push=(dict(required=False, type='bool', default=False)), + state=dict(required=False, choices=["present", "absent"], default="present"), + force_absent=dict(required=False, type="bool", default=False), + force_set_policy=dict(required=False, type="bool", default=False), + policy=dict(required=False, type="json"), + image_tag_mutability=dict(required=False, choices=["mutable", "immutable"], default="mutable"), + purge_policy=dict(required=False, type="bool"), + lifecycle_policy=dict(required=False, type="json"), + purge_lifecycle_policy=dict(required=False, type="bool"), + scan_on_push=(dict(required=False, type="bool", default=False)), encryption_configuration=dict( required=False, - type='dict', + type="dict", options=dict( - encryption_type=dict(required=False, type='str', default='AES256', choices=['AES256', 'KMS']), - kms_key=dict(required=False, type='str', no_log=False), + encryption_type=dict(required=False, type="str", default="AES256", choices=["AES256", "KMS"]), + kms_key=dict(required=False, type="str", no_log=False), ), required_if=[ - ['encryption_type', 'KMS', ['kms_key']], + ["encryption_type", "KMS", ["kms_key"]], ], ), ) mutually_exclusive = [ - ['policy', 'purge_policy'], - ['lifecycle_policy', 'purge_lifecycle_policy']] - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive) + ["policy", "purge_policy"], + ["lifecycle_policy", "purge_lifecycle_policy"], + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + ) ecr = EcsEcr(module) passed, result = run(ecr, module.params) @@ -610,5 +594,5 @@ def main(): module.fail_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ecs_service.py b/plugins/modules/ecs_service.py index 2009dc3b54a..af5ad567dc8 100644 --- a/plugins/modules/ecs_service.py +++ b/plugins/modules/ecs_service.py @@ -697,13 +697,13 @@ DEPLOYMENT_CONTROLLER_TYPE_MAP = { - 'type': 'str', + "type": "str", } DEPLOYMENT_CONFIGURATION_TYPE_MAP = { - 'maximum_percent': 'int', - 'minimum_healthy_percent': 'int', - 'deployment_circuit_breaker': 'dict', + "maximum_percent": "int", + "minimum_healthy_percent": "int", + "deployment_circuit_breaker": "dict", } @@ -712,32 +712,32 @@ class EcsServiceManager: def __init__(self, module): self.module = module - self.ecs = module.client('ecs') - self.ec2 = module.client('ec2') + self.ecs = module.client("ecs") + self.ec2 = module.client("ec2") def format_network_configuration(self, network_config): result = dict() - if network_config['subnets'] is not None: - result['subnets'] = network_config['subnets'] + if network_config["subnets"] is not None: + result["subnets"] = network_config["subnets"] else: self.module.fail_json(msg="Network configuration must include subnets") - if network_config['security_groups'] is not None: - groups = network_config['security_groups'] - if any(not sg.startswith('sg-') for sg in groups): + if network_config["security_groups"] is not None: + groups = network_config["security_groups"] + if any(not sg.startswith("sg-") for sg in groups): try: - vpc_id = self.ec2.describe_subnets(SubnetIds=[result['subnets'][0]])['Subnets'][0]['VpcId'] + vpc_id = self.ec2.describe_subnets(SubnetIds=[result["subnets"][0]])["Subnets"][0]["VpcId"] groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't look up security groups") - result['securityGroups'] = groups - if network_config['assign_public_ip'] is not None: - if network_config['assign_public_ip'] is True: - result['assignPublicIp'] = "ENABLED" + result["securityGroups"] = groups + if network_config["assign_public_ip"] is not None: + if network_config["assign_public_ip"] is True: + result["assignPublicIp"] = "ENABLED" else: - result['assignPublicIp'] = "DISABLED" + result["assignPublicIp"] = "DISABLED" return dict(awsvpcConfiguration=result) - def find_in_array(self, array_of_services, service_name, field_name='serviceArn'): + def find_in_array(self, array_of_services, service_name, field_name="serviceArn"): for c in array_of_services: if c[field_name].endswith(service_name): return c @@ -747,18 +747,18 @@ def describe_service(self, cluster_name, service_name): response = self.ecs.describe_services( cluster=cluster_name, services=[service_name], - include=['TAGS'], + include=["TAGS"], ) - msg = '' + msg = "" - if len(response['failures']) > 0: - c = self.find_in_array(response['failures'], service_name, 'arn') - msg += ", failure reason is " + c['reason'] - if c and c['reason'] == 'MISSING': + if len(response["failures"]) > 0: + c = self.find_in_array(response["failures"], service_name, "arn") + msg += ", failure reason is " + c["reason"] + if c and c["reason"] == "MISSING": return None # fall thru and look through found ones - if len(response['services']) > 0: - c = self.find_in_array(response['services'], service_name) + if len(response["services"]) > 0: + c = self.find_in_array(response["services"], service_name) if c: return c raise Exception("Unknown problem describing service %s." % service_name) @@ -768,21 +768,21 @@ def is_matching_service(self, expected, existing): # arn:aws:ecs:eu-central-1:123456789:task-definition/ansible-fargate-nginx:3 # but the user is just entering # ansible-fargate-nginx:3 - if expected['task_definition'] != existing['taskDefinition'].split('/')[-1]: - if existing.get('deploymentController', {}).get('type', None) != 'CODE_DEPLOY': + if expected["task_definition"] != existing["taskDefinition"].split("/")[-1]: + if existing.get("deploymentController", {}).get("type", None) != "CODE_DEPLOY": return False - if expected.get('health_check_grace_period_seconds'): - if expected.get('health_check_grace_period_seconds') != existing.get('healthCheckGracePeriodSeconds'): + if expected.get("health_check_grace_period_seconds"): + if expected.get("health_check_grace_period_seconds") != existing.get("healthCheckGracePeriodSeconds"): return False - if (expected['load_balancers'] or []) != existing['loadBalancers']: + if (expected["load_balancers"] or []) != existing["loadBalancers"]: return False - if (expected['propagate_tags'] or "NONE") != existing['propagateTags']: + if (expected["propagate_tags"] or "NONE") != existing["propagateTags"]: return False - if boto3_tag_list_to_ansible_dict(existing.get('tags', [])) != (expected['tags'] or {}): + if boto3_tag_list_to_ansible_dict(existing.get("tags", [])) != (expected["tags"] or {}): return False if (expected["enable_execute_command"] or False) != existing.get("enableExecuteCommand", False): @@ -790,8 +790,8 @@ def is_matching_service(self, expected, existing): # expected is params. DAEMON scheduling strategy returns desired count equal to # number of instances running; don't check desired count if scheduling strat is daemon - if (expected['scheduling_strategy'] != 'DAEMON'): - if (expected['desired_count'] or 0) != existing['desiredCount']: + if expected["scheduling_strategy"] != "DAEMON": + if (expected["desired_count"] or 0) != existing["desiredCount"]: return False return True @@ -820,7 +820,6 @@ def create_service( propagate_tags, enable_execute_command, ): - params = dict( cluster=cluster_name, serviceName=service_name, @@ -829,47 +828,49 @@ def create_service( clientToken=client_token, role=role, deploymentConfiguration=deployment_configuration, - placementStrategy=placement_strategy + placementStrategy=placement_strategy, ) if network_configuration: - params['networkConfiguration'] = network_configuration + params["networkConfiguration"] = network_configuration if deployment_controller: - params['deploymentController'] = deployment_controller + params["deploymentController"] = deployment_controller if launch_type: - params['launchType'] = launch_type + params["launchType"] = launch_type if platform_version: - params['platformVersion'] = platform_version + params["platformVersion"] = platform_version if self.health_check_setable(params) and health_check_grace_period_seconds is not None: - params['healthCheckGracePeriodSeconds'] = health_check_grace_period_seconds + params["healthCheckGracePeriodSeconds"] = health_check_grace_period_seconds if service_registries: - params['serviceRegistries'] = service_registries + params["serviceRegistries"] = service_registries # filter placement_constraint and left only those where value is not None # use-case: `distinctInstance` type should never contain `expression`, but None will fail `str` type validation if placement_constraints: - params['placementConstraints'] = [{key: value for key, value in constraint.items() if value is not None} - for constraint in placement_constraints] + params["placementConstraints"] = [ + {key: value for key, value in constraint.items() if value is not None} + for constraint in placement_constraints + ] # desired count is not required if scheduling strategy is daemon if desired_count is not None: - params['desiredCount'] = desired_count + params["desiredCount"] = desired_count if capacity_provider_strategy: - params['capacityProviderStrategy'] = capacity_provider_strategy + params["capacityProviderStrategy"] = capacity_provider_strategy if propagate_tags: - params['propagateTags'] = propagate_tags + params["propagateTags"] = propagate_tags # desired count is not required if scheduling strategy is daemon if desired_count is not None: - params['desiredCount'] = desired_count + params["desiredCount"] = desired_count if tags: - params['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value') + params["tags"] = ansible_dict_to_boto3_tag_list(tags, "key", "value") if scheduling_strategy: - params['schedulingStrategy'] = scheduling_strategy + params["schedulingStrategy"] = scheduling_strategy if enable_execute_command: params["enableExecuteCommand"] = enable_execute_command response = self.ecs.create_service(**params) - return self.jsonize(response['service']) + return self.jsonize(response["service"]) def update_service( self, @@ -893,242 +894,264 @@ def update_service( cluster=cluster_name, service=service_name, taskDefinition=task_definition, - deploymentConfiguration=deployment_configuration) + deploymentConfiguration=deployment_configuration, + ) # filter placement_constraint and left only those where value is not None # use-case: `distinctInstance` type should never contain `expression`, but None will fail `str` type validation if placement_constraints: - params['placementConstraints'] = [{key: value for key, value in constraint.items() if value is not None} - for constraint in placement_constraints] + params["placementConstraints"] = [ + {key: value for key, value in constraint.items() if value is not None} + for constraint in placement_constraints + ] if purge_placement_constraints and not placement_constraints: - params['placementConstraints'] = [] + params["placementConstraints"] = [] if placement_strategy: - params['placementStrategy'] = placement_strategy + params["placementStrategy"] = placement_strategy if purge_placement_strategy and not placement_strategy: - params['placementStrategy'] = [] + params["placementStrategy"] = [] if network_configuration: - params['networkConfiguration'] = network_configuration + params["networkConfiguration"] = network_configuration if force_new_deployment: - params['forceNewDeployment'] = force_new_deployment + params["forceNewDeployment"] = force_new_deployment if capacity_provider_strategy: - params['capacityProviderStrategy'] = capacity_provider_strategy + params["capacityProviderStrategy"] = capacity_provider_strategy if health_check_grace_period_seconds is not None: - params['healthCheckGracePeriodSeconds'] = health_check_grace_period_seconds + params["healthCheckGracePeriodSeconds"] = health_check_grace_period_seconds # desired count is not required if scheduling strategy is daemon if desired_count is not None: - params['desiredCount'] = desired_count + params["desiredCount"] = desired_count if enable_execute_command is not None: params["enableExecuteCommand"] = enable_execute_command if load_balancers: - params['loadBalancers'] = load_balancers + params["loadBalancers"] = load_balancers response = self.ecs.update_service(**params) - return self.jsonize(response['service']) + return self.jsonize(response["service"]) def jsonize(self, service): # some fields are datetime which is not JSON serializable # make them strings - if 'createdAt' in service: - service['createdAt'] = str(service['createdAt']) - if 'deployments' in service: - for d in service['deployments']: - if 'createdAt' in d: - d['createdAt'] = str(d['createdAt']) - if 'updatedAt' in d: - d['updatedAt'] = str(d['updatedAt']) - if 'events' in service: - for e in service['events']: - if 'createdAt' in e: - e['createdAt'] = str(e['createdAt']) + if "createdAt" in service: + service["createdAt"] = str(service["createdAt"]) + if "deployments" in service: + for d in service["deployments"]: + if "createdAt" in d: + d["createdAt"] = str(d["createdAt"]) + if "updatedAt" in d: + d["updatedAt"] = str(d["updatedAt"]) + if "events" in service: + for e in service["events"]: + if "createdAt" in e: + e["createdAt"] = str(e["createdAt"]) return service def delete_service(self, service, cluster=None, force=False): return self.ecs.delete_service(cluster=cluster, service=service, force=force) def health_check_setable(self, params): - load_balancers = params.get('loadBalancers', []) + load_balancers = params.get("loadBalancers", []) return len(load_balancers) > 0 def main(): argument_spec = dict( - state=dict(required=True, choices=['present', 'absent', 'deleting']), - name=dict(required=True, type='str', aliases=['service']), - cluster=dict(required=False, type='str', default='default'), - task_definition=dict(required=False, type='str'), - load_balancers=dict(required=False, default=[], type='list', elements='dict'), - desired_count=dict(required=False, type='int'), - client_token=dict(required=False, default='', type='str', no_log=False), - role=dict(required=False, default='', type='str'), - delay=dict(required=False, type='int', default=10), - repeat=dict(required=False, type='int', default=10), - force_new_deployment=dict(required=False, default=False, type='bool'), - force_deletion=dict(required=False, default=False, type='bool'), - deployment_controller=dict(required=False, default={}, type='dict'), - deployment_configuration=dict(required=False, default={}, type='dict'), - wait=dict(required=False, default=False, type='bool'), + state=dict(required=True, choices=["present", "absent", "deleting"]), + name=dict(required=True, type="str", aliases=["service"]), + cluster=dict(required=False, type="str", default="default"), + task_definition=dict(required=False, type="str"), + load_balancers=dict(required=False, default=[], type="list", elements="dict"), + desired_count=dict(required=False, type="int"), + client_token=dict(required=False, default="", type="str", no_log=False), + role=dict(required=False, default="", type="str"), + delay=dict(required=False, type="int", default=10), + repeat=dict(required=False, type="int", default=10), + force_new_deployment=dict(required=False, default=False, type="bool"), + force_deletion=dict(required=False, default=False, type="bool"), + deployment_controller=dict(required=False, default={}, type="dict"), + deployment_configuration=dict(required=False, default={}, type="dict"), + wait=dict(required=False, default=False, type="bool"), placement_constraints=dict( required=False, default=[], - type='list', - elements='dict', - options=dict( - type=dict(type='str'), - expression=dict(required=False, type='str') - ) + type="list", + elements="dict", + options=dict(type=dict(type="str"), expression=dict(required=False, type="str")), ), - purge_placement_constraints=dict(required=False, default=False, type='bool'), + purge_placement_constraints=dict(required=False, default=False, type="bool"), placement_strategy=dict( required=False, default=[], - type='list', - elements='dict', + type="list", + elements="dict", options=dict( - type=dict(type='str'), - field=dict(type='str'), - ) + type=dict(type="str"), + field=dict(type="str"), + ), ), - purge_placement_strategy=dict(required=False, default=False, type='bool'), - health_check_grace_period_seconds=dict(required=False, type='int'), - network_configuration=dict(required=False, type='dict', options=dict( - subnets=dict(type='list', elements='str'), - security_groups=dict(type='list', elements='str'), - assign_public_ip=dict(type='bool') - )), - launch_type=dict(required=False, choices=['EC2', 'FARGATE']), - platform_version=dict(required=False, type='str'), - service_registries=dict(required=False, type='list', default=[], elements='dict'), - scheduling_strategy=dict(required=False, choices=['DAEMON', 'REPLICA']), + purge_placement_strategy=dict(required=False, default=False, type="bool"), + health_check_grace_period_seconds=dict(required=False, type="int"), + network_configuration=dict( + required=False, + type="dict", + options=dict( + subnets=dict(type="list", elements="str"), + security_groups=dict(type="list", elements="str"), + assign_public_ip=dict(type="bool"), + ), + ), + launch_type=dict(required=False, choices=["EC2", "FARGATE"]), + platform_version=dict(required=False, type="str"), + service_registries=dict(required=False, type="list", default=[], elements="dict"), + scheduling_strategy=dict(required=False, choices=["DAEMON", "REPLICA"]), capacity_provider_strategy=dict( required=False, - type='list', + type="list", default=[], - elements='dict', + elements="dict", options=dict( - capacity_provider=dict(type='str'), - weight=dict(type='int'), - base=dict(type='int') - ) + capacity_provider=dict(type="str"), + weight=dict(type="int"), + base=dict(type="int"), + ), ), propagate_tags=dict(required=False, choices=["TASK_DEFINITION", "SERVICE"]), tags=dict(required=False, type="dict"), enable_execute_command=dict(required=False, type="bool"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True, - required_if=[('launch_type', 'FARGATE', ['network_configuration'])], - required_together=[['load_balancers', 'role']], - mutually_exclusive=[['launch_type', 'capacity_provider_strategy']]) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[("launch_type", "FARGATE", ["network_configuration"])], + required_together=[["load_balancers", "role"]], + mutually_exclusive=[["launch_type", "capacity_provider_strategy"]], + ) - if module.params['state'] == 'present': - if module.params['scheduling_strategy'] == 'REPLICA' and module.params['desired_count'] is None: - module.fail_json(msg='state is present, scheduling_strategy is REPLICA; missing desired_count') - if module.params['task_definition'] is None and not module.params['force_new_deployment']: - module.fail_json(msg='Either task_definition or force_new_deployment is required when status is present.') + if module.params["state"] == "present": + if module.params["scheduling_strategy"] == "REPLICA" and module.params["desired_count"] is None: + module.fail_json(msg="state is present, scheduling_strategy is REPLICA; missing desired_count") + if module.params["task_definition"] is None and not module.params["force_new_deployment"]: + module.fail_json(msg="Either task_definition or force_new_deployment is required when status is present.") - if len(module.params['capacity_provider_strategy']) > 6: - module.fail_json(msg='AWS allows a maximum of six capacity providers in the strategy.') + if len(module.params["capacity_provider_strategy"]) > 6: + module.fail_json(msg="AWS allows a maximum of six capacity providers in the strategy.") service_mgr = EcsServiceManager(module) - if module.params['network_configuration']: - network_configuration = service_mgr.format_network_configuration(module.params['network_configuration']) + if module.params["network_configuration"]: + network_configuration = service_mgr.format_network_configuration(module.params["network_configuration"]) else: network_configuration = None - deployment_controller = map_complex_type(module.params['deployment_controller'], - DEPLOYMENT_CONTROLLER_TYPE_MAP) + deployment_controller = map_complex_type(module.params["deployment_controller"], DEPLOYMENT_CONTROLLER_TYPE_MAP) deploymentController = snake_dict_to_camel_dict(deployment_controller) - deployment_configuration = map_complex_type(module.params['deployment_configuration'], - DEPLOYMENT_CONFIGURATION_TYPE_MAP) + deployment_configuration = map_complex_type( + module.params["deployment_configuration"], DEPLOYMENT_CONFIGURATION_TYPE_MAP + ) deploymentConfiguration = snake_dict_to_camel_dict(deployment_configuration) - serviceRegistries = list(map(snake_dict_to_camel_dict, module.params['service_registries'])) - capacityProviders = list(map(snake_dict_to_camel_dict, module.params['capacity_provider_strategy'])) + serviceRegistries = list(map(snake_dict_to_camel_dict, module.params["service_registries"])) + capacityProviders = list(map(snake_dict_to_camel_dict, module.params["capacity_provider_strategy"])) try: - existing = service_mgr.describe_service(module.params['cluster'], module.params['name']) + existing = service_mgr.describe_service(module.params["cluster"], module.params["name"]) except Exception as e: - module.fail_json_aws(e, - msg="Exception describing service '{0}' in cluster '{1}'" - .format(module.params['name'], module.params['cluster'])) + module.fail_json_aws( + e, + msg="Exception describing service '{0}' in cluster '{1}'".format( + module.params["name"], module.params["cluster"] + ), + ) results = dict(changed=False) - if module.params['state'] == 'present': - + if module.params["state"] == "present": matching = False update = False - if existing and 'status' in existing and existing['status'] == "ACTIVE": - if module.params['force_new_deployment']: + if existing and "status" in existing and existing["status"] == "ACTIVE": + if module.params["force_new_deployment"]: update = True elif service_mgr.is_matching_service(module.params, existing): matching = True - results['service'] = existing + results["service"] = existing else: update = True if not matching: if not module.check_mode: - - role = module.params['role'] - clientToken = module.params['client_token'] + role = module.params["role"] + clientToken = module.params["client_token"] loadBalancers = [] - for loadBalancer in module.params['load_balancers']: - if 'containerPort' in loadBalancer: - loadBalancer['containerPort'] = int(loadBalancer['containerPort']) + for loadBalancer in module.params["load_balancers"]: + if "containerPort" in loadBalancer: + loadBalancer["containerPort"] = int(loadBalancer["containerPort"]) loadBalancers.append(loadBalancer) for loadBalancer in loadBalancers: - if 'containerPort' in loadBalancer: - loadBalancer['containerPort'] = int(loadBalancer['containerPort']) + if "containerPort" in loadBalancer: + loadBalancer["containerPort"] = int(loadBalancer["containerPort"]) if update: # check various parameters and AWS SDK versions and give a helpful error if the SDK is not new enough for feature - if module.params['scheduling_strategy']: - if (existing['schedulingStrategy']) != module.params['scheduling_strategy']: - module.fail_json(msg="It is not possible to update the scheduling strategy of an existing service") - - if module.params['service_registries']: - if (existing['serviceRegistries'] or []) != serviceRegistries: - module.fail_json(msg="It is not possible to update the service registries of an existing service") - if module.params['capacity_provider_strategy']: - if 'launchType' in existing.keys(): - module.fail_json(msg="It is not possible to change an existing service from launch_type to capacity_provider_strategy.") - if module.params['launch_type']: - if 'capacityProviderStrategy' in existing.keys(): - module.fail_json(msg="It is not possible to change an existing service from capacity_provider_strategy to launch_type.") - if (existing['loadBalancers'] or []) != loadBalancers: + if module.params["scheduling_strategy"]: + if (existing["schedulingStrategy"]) != module.params["scheduling_strategy"]: + module.fail_json( + msg="It is not possible to update the scheduling strategy of an existing service" + ) + + if module.params["service_registries"]: + if (existing["serviceRegistries"] or []) != serviceRegistries: + module.fail_json( + msg="It is not possible to update the service registries of an existing service" + ) + if module.params["capacity_provider_strategy"]: + if "launchType" in existing.keys(): + module.fail_json( + msg="It is not possible to change an existing service from launch_type to capacity_provider_strategy." + ) + if module.params["launch_type"]: + if "capacityProviderStrategy" in existing.keys(): + module.fail_json( + msg="It is not possible to change an existing service from capacity_provider_strategy to launch_type." + ) + if (existing["loadBalancers"] or []) != loadBalancers: # fails if deployment type is not CODE_DEPLOY or ECS - if existing['deploymentController']['type'] not in ['CODE_DEPLOY', 'ECS']: - module.fail_json(msg="It is not possible to update the load balancers of an existing service") + if existing["deploymentController"]["type"] not in ["CODE_DEPLOY", "ECS"]: + module.fail_json( + msg="It is not possible to update the load balancers of an existing service" + ) - if existing.get('deploymentController', {}).get('type', None) == 'CODE_DEPLOY': - task_definition = '' + if existing.get("deploymentController", {}).get("type", None) == "CODE_DEPLOY": + task_definition = "" network_configuration = [] else: - task_definition = module.params['task_definition'] + task_definition = module.params["task_definition"] - if module.params['propagate_tags'] and module.params['propagate_tags'] != existing['propagateTags']: - module.fail_json(msg="It is not currently supported to enable propagation tags of an existing service") + if module.params["propagate_tags"] and module.params["propagate_tags"] != existing["propagateTags"]: + module.fail_json( + msg="It is not currently supported to enable propagation tags of an existing service" + ) - if module.params['tags'] and boto3_tag_list_to_ansible_dict(existing['tags']) != module.params['tags']: + if ( + module.params["tags"] + and boto3_tag_list_to_ansible_dict(existing["tags"]) != module.params["tags"] + ): module.fail_json(msg="It is not currently supported to change tags of an existing service") - updatedLoadBalancers = loadBalancers if existing['deploymentController']['type'] == 'ECS' else [] + updatedLoadBalancers = loadBalancers if existing["deploymentController"]["type"] == "ECS" else [] - if task_definition is None and module.params['force_new_deployment']: - task_definition = existing['taskDefinition'] + if task_definition is None and module.params["force_new_deployment"]: + task_definition = existing["taskDefinition"] try: # update required @@ -1180,76 +1203,73 @@ def main(): except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't create service") - if response.get('tags', None): - response['tags'] = boto3_tag_list_to_ansible_dict(response['tags']) - results['service'] = response + if response.get("tags", None): + response["tags"] = boto3_tag_list_to_ansible_dict(response["tags"]) + results["service"] = response - results['changed'] = True + results["changed"] = True - elif module.params['state'] == 'absent': + elif module.params["state"] == "absent": if not existing: pass else: # it exists, so we should delete it and mark changed. # return info about the cluster deleted - del existing['deployments'] - del existing['events'] - results['ansible_facts'] = existing - if 'status' in existing and existing['status'] == "INACTIVE": - results['changed'] = False + del existing["deployments"] + del existing["events"] + results["ansible_facts"] = existing + if "status" in existing and existing["status"] == "INACTIVE": + results["changed"] = False else: if not module.check_mode: try: service_mgr.delete_service( - module.params['name'], - module.params['cluster'], - module.params['force_deletion'], + module.params["name"], + module.params["cluster"], + module.params["force_deletion"], ) # Wait for service to be INACTIVE prior to exiting - if module.params['wait']: - waiter = service_mgr.ecs.get_waiter('services_inactive') + if module.params["wait"]: + waiter = service_mgr.ecs.get_waiter("services_inactive") try: waiter.wait( - services=[module.params['name']], - cluster=module.params['cluster'], + services=[module.params["name"]], + cluster=module.params["cluster"], WaiterConfig={ - 'Delay': module.params['delay'], - 'MaxAttempts': module.params['repeat'] - } + "Delay": module.params["delay"], + "MaxAttempts": module.params["repeat"], + }, ) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, 'Timeout waiting for service removal') + module.fail_json_aws(e, "Timeout waiting for service removal") except botocore.exceptions.ClientError as e: module.fail_json_aws(e, msg="Couldn't delete service") - results['changed'] = True + results["changed"] = True - elif module.params['state'] == 'deleting': + elif module.params["state"] == "deleting": if not existing: - module.fail_json(msg="Service '" + module.params['name'] + " not found.") + module.fail_json(msg="Service '" + module.params["name"] + " not found.") return # it exists, so we should delete it and mark changed. # return info about the cluster deleted - delay = module.params['delay'] - repeat = module.params['repeat'] + delay = module.params["delay"] + repeat = module.params["repeat"] time.sleep(delay) for i in range(repeat): - existing = service_mgr.describe_service(module.params['cluster'], module.params['name']) - status = existing['status'] + existing = service_mgr.describe_service(module.params["cluster"], module.params["name"]) + status = existing["status"] if status == "INACTIVE": - results['changed'] = True + results["changed"] = True break time.sleep(delay) if i is repeat - 1: - module.fail_json( - msg="Service still not deleted after {0} tries of {1} seconds each." - .format(repeat, delay) - ) + module.fail_json(msg="Service still not deleted after {0} tries of {1} seconds each.".format(repeat, delay)) return module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ecs_service_info.py b/plugins/modules/ecs_service_info.py index 41dd999c9c1..02a6abff207 100644 --- a/plugins/modules/ecs_service_info.py +++ b/plugins/modules/ecs_service_info.py @@ -147,14 +147,14 @@ class EcsServiceManager: def __init__(self, module): self.module = module - self.ecs = module.client('ecs') + self.ecs = module.client("ecs") @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def list_services_with_backoff(self, **kwargs): - paginator = self.ecs.get_paginator('list_services') + paginator = self.ecs.get_paginator("list_services") try: return paginator.paginate(**kwargs).build_full_result() - except is_boto3_error_code('ClusterNotFoundException') as e: + except is_boto3_error_code("ClusterNotFoundException") as e: self.module.fail_json_aws(e, "Could not find cluster to list services") @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) @@ -164,43 +164,43 @@ def describe_services_with_backoff(self, **kwargs): def list_services(self, cluster): fn_args = dict() if cluster and cluster is not None: - fn_args['cluster'] = cluster + fn_args["cluster"] = cluster try: response = self.list_services_with_backoff(**fn_args) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't list ECS services") - relevant_response = dict(services=response['serviceArns']) + relevant_response = dict(services=response["serviceArns"]) return relevant_response def describe_services(self, cluster, services): fn_args = dict() if cluster and cluster is not None: - fn_args['cluster'] = cluster - fn_args['services'] = services + fn_args["cluster"] = cluster + fn_args["services"] = services try: response = self.describe_services_with_backoff(**fn_args) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't describe ECS services") - running_services = [self.extract_service_from(service) for service in response.get('services', [])] - services_not_running = response.get('failures', []) + running_services = [self.extract_service_from(service) for service in response.get("services", [])] + services_not_running = response.get("failures", []) return running_services, services_not_running def extract_service_from(self, service): # some fields are datetime which is not JSON serializable # make them strings - if 'deployments' in service: - for d in service['deployments']: - if 'createdAt' in d: - d['createdAt'] = str(d['createdAt']) - if 'updatedAt' in d: - d['updatedAt'] = str(d['updatedAt']) - if 'events' in service: - if not self.module.params['events']: - del service['events'] + if "deployments" in service: + for d in service["deployments"]: + if "createdAt" in d: + d["createdAt"] = str(d["createdAt"]) + if "updatedAt" in d: + d["updatedAt"] = str(d["updatedAt"]) + if "events" in service: + if not self.module.params["events"]: + del service["events"] else: - for e in service['events']: - if 'createdAt' in e: - e['createdAt'] = str(e['createdAt']) + for e in service["events"]: + if "createdAt" in e: + e["createdAt"] = str(e["createdAt"]) return service @@ -208,38 +208,37 @@ def chunks(l, n): """Yield successive n-sized chunks from l.""" """ https://stackoverflow.com/a/312464 """ for i in range(0, len(l), n): - yield l[i:i + n] + yield l[i:i + n] # fmt: skip def main(): - argument_spec = dict( - details=dict(type='bool', default=False), - events=dict(type='bool', default=True), + details=dict(type="bool", default=False), + events=dict(type="bool", default=True), cluster=dict(), - service=dict(type='list', elements='str', aliases=['name']) + service=dict(type="list", elements="str", aliases=["name"]), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - show_details = module.params.get('details') + show_details = module.params.get("details") task_mgr = EcsServiceManager(module) if show_details: - if module.params['service']: - services = module.params['service'] + if module.params["service"]: + services = module.params["service"] else: - services = task_mgr.list_services(module.params['cluster'])['services'] + services = task_mgr.list_services(module.params["cluster"])["services"] ecs_info = dict(services=[], services_not_running=[]) for chunk in chunks(services, 10): - running_services, services_not_running = task_mgr.describe_services(module.params['cluster'], chunk) - ecs_info['services'].extend(running_services) - ecs_info['services_not_running'].extend(services_not_running) + running_services, services_not_running = task_mgr.describe_services(module.params["cluster"], chunk) + ecs_info["services"].extend(running_services) + ecs_info["services_not_running"].extend(services_not_running) else: - ecs_info = task_mgr.list_services(module.params['cluster']) + ecs_info = task_mgr.list_services(module.params["cluster"]) module.exit_json(changed=False, **ecs_info) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ecs_tag.py b/plugins/modules/ecs_tag.py index 7aac8dfb4a7..f11fc1f33ac 100644 --- a/plugins/modules/ecs_tag.py +++ b/plugins/modules/ecs_tag.py @@ -121,33 +121,32 @@ def get_tags(ecs, module, resource): try: - return boto3_tag_list_to_ansible_dict(ecs.list_tags_for_resource(resourceArn=resource)['tags']) + return boto3_tag_list_to_ansible_dict(ecs.list_tags_for_resource(resourceArn=resource)["tags"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to fetch tags for resource {0}'.format(resource)) + module.fail_json_aws(e, msg="Failed to fetch tags for resource {0}".format(resource)) def get_arn(ecs, module, cluster_name, resource_type, resource): - try: - if resource_type == 'cluster': + if resource_type == "cluster": description = ecs.describe_clusters(clusters=[resource]) - resource_arn = description['clusters'][0]['clusterArn'] - elif resource_type == 'task': + resource_arn = description["clusters"][0]["clusterArn"] + elif resource_type == "task": description = ecs.describe_tasks(cluster=cluster_name, tasks=[resource]) - resource_arn = description['tasks'][0]['taskArn'] - elif resource_type == 'service': + resource_arn = description["tasks"][0]["taskArn"] + elif resource_type == "service": description = ecs.describe_services(cluster=cluster_name, services=[resource]) - resource_arn = description['services'][0]['serviceArn'] - elif resource_type == 'task_definition': + resource_arn = description["services"][0]["serviceArn"] + elif resource_type == "task_definition": description = ecs.describe_task_definition(taskDefinition=resource) - resource_arn = description['taskDefinition']['taskDefinitionArn'] - elif resource_type == 'container': + resource_arn = description["taskDefinition"]["taskDefinitionArn"] + elif resource_type == "container": description = ecs.describe_container_instances(clusters=[resource]) - resource_arn = description['containerInstances'][0]['containerInstanceArn'] + resource_arn = description["containerInstances"][0]["containerInstanceArn"] except (IndexError, KeyError): - module.fail_json(msg='Failed to find {0} {1}'.format(resource_type, resource)) + module.fail_json(msg="Failed to find {0} {1}".format(resource_type, resource)) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to find {0} {1}'.format(resource_type, resource)) + module.fail_json_aws(e, msg="Failed to find {0} {1}".format(resource_type, resource)) return resource_arn @@ -156,28 +155,28 @@ def main(): argument_spec = dict( cluster_name=dict(required=True), resource=dict(required=False), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']), - resource_type=dict(default='cluster', choices=['cluster', 'task', 'service', 'task_definition', 'container']) + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=False), + state=dict(default="present", choices=["present", "absent"]), + resource_type=dict(default="cluster", choices=["cluster", "task", "service", "task_definition", "container"]), ) - required_if = [('state', 'present', ['tags']), ('state', 'absent', ['tags'])] + required_if = [("state", "present", ["tags"]), ("state", "absent", ["tags"])] module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True) - resource_type = module.params['resource_type'] - cluster_name = module.params['cluster_name'] - if resource_type == 'cluster': + resource_type = module.params["resource_type"] + cluster_name = module.params["cluster_name"] + if resource_type == "cluster": resource = cluster_name else: - resource = module.params['resource'] - tags = module.params['tags'] - state = module.params['state'] - purge_tags = module.params['purge_tags'] + resource = module.params["resource"] + tags = module.params["tags"] + state = module.params["state"] + purge_tags = module.params["purge_tags"] - result = {'changed': False} + result = {"changed": False} - ecs = module.client('ecs') + ecs = module.client("ecs") resource_arn = get_arn(ecs, module, cluster_name, resource_type, resource) @@ -186,7 +185,7 @@ def main(): add_tags, remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags) remove_tags = {} - if state == 'absent': + if state == "absent": for key in tags: if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]): remove_tags[key] = current_tags[key] @@ -195,28 +194,28 @@ def main(): remove_tags[key] = current_tags[key] if remove_tags: - result['changed'] = True - result['removed_tags'] = remove_tags + result["changed"] = True + result["removed_tags"] = remove_tags if not module.check_mode: try: ecs.untag_resource(resourceArn=resource_arn, tagKeys=list(remove_tags.keys())) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource)) + module.fail_json_aws(e, msg="Failed to remove tags {0} from resource {1}".format(remove_tags, resource)) - if state == 'present' and add_tags: - result['changed'] = True - result['added_tags'] = add_tags + if state == "present" and add_tags: + result["changed"] = True + result["added_tags"] = add_tags current_tags.update(add_tags) if not module.check_mode: try: - tags = ansible_dict_to_boto3_tag_list(add_tags, tag_name_key_name='key', tag_value_key_name='value') + tags = ansible_dict_to_boto3_tag_list(add_tags, tag_name_key_name="key", tag_value_key_name="value") ecs.tag_resource(resourceArn=resource_arn, tags=tags) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource)) + module.fail_json_aws(e, msg="Failed to set tags {0} on resource {1}".format(add_tags, resource)) - result['tags'] = get_tags(ecs, module, resource_arn) + result["tags"] = get_tags(ecs, module, resource_arn) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ecs_task.py b/plugins/modules/ecs_task.py index 6c693b317bc..dfd7d9a7902 100644 --- a/plugins/modules/ecs_task.py +++ b/plugins/modules/ecs_task.py @@ -258,29 +258,29 @@ class EcsExecManager: def __init__(self, module): self.module = module - self.ecs = module.client('ecs') - self.ec2 = module.client('ec2') + self.ecs = module.client("ecs") + self.ec2 = module.client("ec2") def format_network_configuration(self, network_config): result = dict() - if 'subnets' in network_config: - result['subnets'] = network_config['subnets'] + if "subnets" in network_config: + result["subnets"] = network_config["subnets"] else: self.module.fail_json(msg="Network configuration must include subnets") - if 'security_groups' in network_config: - groups = network_config['security_groups'] - if any(not sg.startswith('sg-') for sg in groups): + if "security_groups" in network_config: + groups = network_config["security_groups"] + if any(not sg.startswith("sg-") for sg in groups): try: - vpc_id = self.ec2.describe_subnets(SubnetIds=[result['subnets'][0]])['Subnets'][0]['VpcId'] + vpc_id = self.ec2.describe_subnets(SubnetIds=[result["subnets"][0]])["Subnets"][0]["VpcId"] groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't look up security groups") - result['securityGroups'] = groups - if 'assign_public_ip' in network_config: - if network_config['assign_public_ip'] is True: - result['assignPublicIp'] = "ENABLED" + result["securityGroups"] = groups + if "assign_public_ip" in network_config: + if network_config["assign_public_ip"] is True: + result["assignPublicIp"] = "ENABLED" else: - result['assignPublicIp'] = "DISABLED" + result["assignPublicIp"] = "DISABLED" return dict(awsvpcConfiguration=result) @@ -288,10 +288,10 @@ def list_tasks(self, cluster_name, service_name, status): response = self.ecs.list_tasks( cluster=cluster_name, family=service_name, - desiredStatus=status + desiredStatus=status, ) - if len(response['taskArns']) > 0: - for c in response['taskArns']: + if len(response["taskArns"]) > 0: + for c in response["taskArns"]: if c.endswith(service_name): return c return None @@ -299,14 +299,17 @@ def list_tasks(self, cluster_name, service_name, status): def run_task(self, cluster, task_definition, overrides, count, startedBy, launch_type, tags): if overrides is None: overrides = dict() - params = dict(cluster=cluster, taskDefinition=task_definition, - overrides=overrides, count=count, startedBy=startedBy) - if self.module.params['network_configuration']: - params['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration']) + params = dict( + cluster=cluster, taskDefinition=task_definition, overrides=overrides, count=count, startedBy=startedBy + ) + if self.module.params["network_configuration"]: + params["networkConfiguration"] = self.format_network_configuration( + self.module.params["network_configuration"] + ) if launch_type: - params['launchType'] = launch_type + params["launchType"] = launch_type if tags: - params['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value') + params["tags"] = ansible_dict_to_boto3_tag_list(tags, "key", "value") # TODO: need to check if long arn format enabled. try: @@ -314,168 +317,164 @@ def run_task(self, cluster, task_definition, overrides, count, startedBy, launch except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't run task") # include tasks and failures - return response['tasks'] + return response["tasks"] def start_task(self, cluster, task_definition, overrides, container_instances, startedBy, tags): args = dict() if cluster: - args['cluster'] = cluster + args["cluster"] = cluster if task_definition: - args['taskDefinition'] = task_definition + args["taskDefinition"] = task_definition if overrides: - args['overrides'] = overrides + args["overrides"] = overrides if container_instances: - args['containerInstances'] = container_instances + args["containerInstances"] = container_instances if startedBy: - args['startedBy'] = startedBy - if self.module.params['network_configuration']: - args['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration']) + args["startedBy"] = startedBy + if self.module.params["network_configuration"]: + args["networkConfiguration"] = self.format_network_configuration( + self.module.params["network_configuration"] + ) if tags: - args['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value') + args["tags"] = ansible_dict_to_boto3_tag_list(tags, "key", "value") try: response = self.ecs.start_task(**args) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't start task") # include tasks and failures - return response['tasks'] + return response["tasks"] def stop_task(self, cluster, task): response = self.ecs.stop_task(cluster=cluster, task=task) - return response['task'] + return response["task"] def ecs_task_long_format_enabled(self): - account_support = self.ecs.list_account_settings(name='taskLongArnFormat', effectiveSettings=True) - return account_support['settings'][0]['value'] == 'enabled' + account_support = self.ecs.list_account_settings(name="taskLongArnFormat", effectiveSettings=True) + return account_support["settings"][0]["value"] == "enabled" def main(): argument_spec = dict( - operation=dict(required=True, choices=['run', 'start', 'stop']), - cluster=dict(required=False, type='str', default='default'), # R S P - task_definition=dict(required=False, type='str'), # R* S* - overrides=dict(required=False, type='dict'), # R S - count=dict(required=False, type='int'), # R - task=dict(required=False, type='str'), # P* - container_instances=dict(required=False, type='list', elements='str'), # S* - started_by=dict(required=False, type='str'), # R S - network_configuration=dict(required=False, type='dict'), - launch_type=dict(required=False, choices=['EC2', 'FARGATE']), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - wait=dict(required=False, default=False, type='bool'), + operation=dict(required=True, choices=["run", "start", "stop"]), + cluster=dict(required=False, type="str", default="default"), # R S P + task_definition=dict(required=False, type="str"), # R* S* + overrides=dict(required=False, type="dict"), # R S + count=dict(required=False, type="int"), # R + task=dict(required=False, type="str"), # P* + container_instances=dict(required=False, type="list", elements="str"), # S* + started_by=dict(required=False, type="str"), # R S + network_configuration=dict(required=False, type="dict"), + launch_type=dict(required=False, choices=["EC2", "FARGATE"]), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + wait=dict(required=False, default=False, type="bool"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, - required_if=[ - ('launch_type', 'FARGATE', ['network_configuration']), - ('operation', 'run', ['task_definition']), - ('operation', 'start', [ - 'task_definition', - 'container_instances' - ]), - ('operation', 'stop', ['task_definition', 'task']), - ]) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[ + ("launch_type", "FARGATE", ["network_configuration"]), + ("operation", "run", ["task_definition"]), + ("operation", "start", ["task_definition", "container_instances"]), + ("operation", "stop", ["task_definition", "task"]), + ], + ) # Validate Inputs - if module.params['operation'] == 'run': - task_to_list = module.params['task_definition'] + if module.params["operation"] == "run": + task_to_list = module.params["task_definition"] status_type = "RUNNING" - if module.params['operation'] == 'start': - task_to_list = module.params['task'] + if module.params["operation"] == "start": + task_to_list = module.params["task"] status_type = "RUNNING" - if module.params['operation'] == 'stop': - task_to_list = module.params['task_definition'] + if module.params["operation"] == "stop": + task_to_list = module.params["task_definition"] status_type = "STOPPED" service_mgr = EcsExecManager(module) - if module.params['tags']: + if module.params["tags"]: if not service_mgr.ecs_task_long_format_enabled(): module.fail_json(msg="Cannot set task tags: long format task arns are required to set tags") - existing = service_mgr.list_tasks(module.params['cluster'], task_to_list, status_type) + existing = service_mgr.list_tasks(module.params["cluster"], task_to_list, status_type) results = dict(changed=False) - if module.params['operation'] == 'run': + if module.params["operation"] == "run": if existing: # TBD - validate the rest of the details - results['task'] = existing + results["task"] = existing else: if not module.check_mode: - # run_task returns a list of tasks created tasks = service_mgr.run_task( - module.params['cluster'], - module.params['task_definition'], - module.params['overrides'], - module.params['count'], - module.params['started_by'], - module.params['launch_type'], - module.params['tags'], + module.params["cluster"], + module.params["task_definition"], + module.params["overrides"], + module.params["count"], + module.params["started_by"], + module.params["launch_type"], + module.params["tags"], ) # Wait for task(s) to be running prior to exiting - if module.params['wait']: - - waiter = service_mgr.ecs.get_waiter('tasks_running') + if module.params["wait"]: + waiter = service_mgr.ecs.get_waiter("tasks_running") try: waiter.wait( - tasks=[task['taskArn'] for task in tasks], - cluster=module.params['cluster'], + tasks=[task["taskArn"] for task in tasks], + cluster=module.params["cluster"], ) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, 'Timeout waiting for tasks to run') + module.fail_json_aws(e, "Timeout waiting for tasks to run") - results['task'] = tasks + results["task"] = tasks - results['changed'] = True + results["changed"] = True - elif module.params['operation'] == 'start': + elif module.params["operation"] == "start": if existing: # TBD - validate the rest of the details - results['task'] = existing + results["task"] = existing else: if not module.check_mode: - results['task'] = service_mgr.start_task( - module.params['cluster'], - module.params['task_definition'], - module.params['overrides'], - module.params['container_instances'], - module.params['started_by'], - module.params['tags'], + results["task"] = service_mgr.start_task( + module.params["cluster"], + module.params["task_definition"], + module.params["overrides"], + module.params["container_instances"], + module.params["started_by"], + module.params["tags"], ) - results['changed'] = True + results["changed"] = True - elif module.params['operation'] == 'stop': + elif module.params["operation"] == "stop": if existing: - results['task'] = existing + results["task"] = existing else: if not module.check_mode: # it exists, so we should delete it and mark changed. # return info about the cluster deleted - results['task'] = service_mgr.stop_task( - module.params['cluster'], - module.params['task'] - ) + results["task"] = service_mgr.stop_task(module.params["cluster"], module.params["task"]) # Wait for task to be stopped prior to exiting - if module.params['wait']: - - waiter = service_mgr.ecs.get_waiter('tasks_stopped') + if module.params["wait"]: + waiter = service_mgr.ecs.get_waiter("tasks_stopped") try: waiter.wait( - tasks=[module.params['task']], - cluster=module.params['cluster'], + tasks=[module.params["task"]], + cluster=module.params["cluster"], ) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, 'Timeout waiting for task to stop') + module.fail_json_aws(e, "Timeout waiting for task to stop") - results['changed'] = True + results["changed"] = True module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ecs_taskdefinition.py b/plugins/modules/ecs_taskdefinition.py index 16fcab712c4..0a8e413dbcd 100644 --- a/plugins/modules/ecs_taskdefinition.py +++ b/plugins/modules/ecs_taskdefinition.py @@ -792,49 +792,62 @@ class EcsTaskManager: def __init__(self, module): self.module = module - self.ecs = module.client('ecs', AWSRetry.jittered_backoff()) + self.ecs = module.client("ecs", AWSRetry.jittered_backoff()) def describe_task(self, task_name): try: response = self.ecs.describe_task_definition(aws_retry=True, taskDefinition=task_name) - return response['taskDefinition'] + return response["taskDefinition"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: return None - def register_task(self, family, task_role_arn, execution_role_arn, network_mode, container_definitions, - volumes, launch_type, cpu, memory, placement_constraints): + def register_task( + self, + family, + task_role_arn, + execution_role_arn, + network_mode, + container_definitions, + volumes, + launch_type, + cpu, + memory, + placement_constraints, + ): validated_containers = [] # Ensures the number parameters are int as required by the AWS SDK for container in container_definitions: - for param in ('memory', 'cpu', 'memoryReservation', 'startTimeout', 'stopTimeout'): + for param in ("memory", "cpu", "memoryReservation", "startTimeout", "stopTimeout"): if param in container: container[param] = int(container[param]) - if 'portMappings' in container: - for port_mapping in container['portMappings']: - for port in ('hostPort', 'containerPort'): + if "portMappings" in container: + for port_mapping in container["portMappings"]: + for port in ("hostPort", "containerPort"): if port in port_mapping: port_mapping[port] = int(port_mapping[port]) - if network_mode == 'awsvpc' and 'hostPort' in port_mapping: - if port_mapping['hostPort'] != port_mapping.get('containerPort'): - self.module.fail_json(msg="In awsvpc network mode, host port must be set to the same as " - "container port or not be set") - - if 'linuxParameters' in container: - for linux_param in container.get('linuxParameters'): - if linux_param == 'tmpfs': - for tmpfs_param in container['linuxParameters']['tmpfs']: - if 'size' in tmpfs_param: - tmpfs_param['size'] = int(tmpfs_param['size']) - - for param in ('maxSwap', 'swappiness', 'sharedMemorySize'): + if network_mode == "awsvpc" and "hostPort" in port_mapping: + if port_mapping["hostPort"] != port_mapping.get("containerPort"): + self.module.fail_json( + msg="In awsvpc network mode, host port must be set to the same as " + "container port or not be set" + ) + + if "linuxParameters" in container: + for linux_param in container.get("linuxParameters"): + if linux_param == "tmpfs": + for tmpfs_param in container["linuxParameters"]["tmpfs"]: + if "size" in tmpfs_param: + tmpfs_param["size"] = int(tmpfs_param["size"]) + + for param in ("maxSwap", "swappiness", "sharedMemorySize"): if param in linux_param: - container['linuxParameters'][param] = int(container['linuxParameters'][param]) + container["linuxParameters"][param] = int(container["linuxParameters"][param]) - if 'ulimits' in container: - for limits_mapping in container['ulimits']: - for limit in ('softLimit', 'hardLimit'): + if "ulimits" in container: + for limits_mapping in container["ulimits"]: + for limit in ("softLimit", "hardLimit"): if limit in limits_mapping: limits_mapping[limit] = int(limits_mapping[limit]) @@ -844,47 +857,42 @@ def register_task(self, family, task_role_arn, execution_role_arn, network_mode, family=family, taskRoleArn=task_role_arn, containerDefinitions=container_definitions, - volumes=volumes + volumes=volumes, ) - if network_mode != 'default': - params['networkMode'] = network_mode + if network_mode != "default": + params["networkMode"] = network_mode if cpu: - params['cpu'] = cpu + params["cpu"] = cpu if memory: - params['memory'] = memory + params["memory"] = memory if launch_type: - params['requiresCompatibilities'] = [launch_type] + params["requiresCompatibilities"] = [launch_type] if execution_role_arn: - params['executionRoleArn'] = execution_role_arn + params["executionRoleArn"] = execution_role_arn if placement_constraints: - params['placementConstraints'] = placement_constraints + params["placementConstraints"] = placement_constraints try: response = self.ecs.register_task_definition(aws_retry=True, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Failed to register task") - return response['taskDefinition'] + return response["taskDefinition"] def describe_task_definitions(self, family): - data = { - "taskDefinitionArns": [], - "nextToken": None - } + data = {"taskDefinitionArns": [], "nextToken": None} def fetch(): # Boto3 is weird about params passed, so only pass nextToken if we have a value - params = { - 'familyPrefix': family - } + params = {"familyPrefix": family} - if data['nextToken']: - params['nextToken'] = data['nextToken'] + if data["nextToken"]: + params["nextToken"] = data["nextToken"] result = self.ecs.list_task_definitions(**params) - data['taskDefinitionArns'] += result['taskDefinitionArns'] - data['nextToken'] = result.get('nextToken', None) - return data['nextToken'] is not None + data["taskDefinitionArns"] += result["taskDefinitionArns"] + data["nextToken"] = result.get("nextToken", None) + return data["nextToken"] is not None # Fetch all the arns, possibly across multiple pages while fetch(): @@ -893,118 +901,134 @@ def fetch(): # Return the full descriptions of the task definitions, sorted ascending by revision return list( sorted( - [self.ecs.describe_task_definition(taskDefinition=arn)['taskDefinition'] for arn in data['taskDefinitionArns']], - key=lambda td: td['revision'] + [ + self.ecs.describe_task_definition(taskDefinition=arn)["taskDefinition"] + for arn in data["taskDefinitionArns"] + ], + key=lambda td: td["revision"], ) ) def deregister_task(self, taskArn): response = self.ecs.deregister_task_definition(taskDefinition=taskArn) - return response['taskDefinition'] + return response["taskDefinition"] def main(): argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), - arn=dict(required=False, type='str'), - family=dict(required=False, type='str'), - revision=dict(required=False, type='int'), - force_create=dict(required=False, default=False, type='bool'), - containers=dict(required=True, type='list', elements='dict'), - network_mode=dict(required=False, default='bridge', choices=['default', 'bridge', 'host', 'none', 'awsvpc'], type='str'), - task_role_arn=dict(required=False, default='', type='str'), - execution_role_arn=dict(required=False, default='', type='str'), - volumes=dict(required=False, type='list', elements='dict'), - launch_type=dict(required=False, choices=['EC2', 'FARGATE']), + state=dict(required=True, choices=["present", "absent"]), + arn=dict(required=False, type="str"), + family=dict(required=False, type="str"), + revision=dict(required=False, type="int"), + force_create=dict(required=False, default=False, type="bool"), + containers=dict(required=True, type="list", elements="dict"), + network_mode=dict( + required=False, default="bridge", choices=["default", "bridge", "host", "none", "awsvpc"], type="str" + ), + task_role_arn=dict(required=False, default="", type="str"), + execution_role_arn=dict(required=False, default="", type="str"), + volumes=dict(required=False, type="list", elements="dict"), + launch_type=dict(required=False, choices=["EC2", "FARGATE"]), cpu=dict(), - memory=dict(required=False, type='str'), - placement_constraints=dict(required=False, type='list', elements='dict', - options=dict(type=dict(type='str'), expression=dict(type='str'))), + memory=dict(required=False, type="str"), + placement_constraints=dict( + required=False, + type="list", + elements="dict", + options=dict(type=dict(type="str"), expression=dict(type="str")), + ), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True, - required_if=[('launch_type', 'FARGATE', ['cpu', 'memory'])] - ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[("launch_type", "FARGATE", ["cpu", "memory"])], + ) task_to_describe = None task_mgr = EcsTaskManager(module) results = dict(changed=False) - if module.params['state'] == 'present': - if 'containers' not in module.params or not module.params['containers']: + if module.params["state"] == "present": + if "containers" not in module.params or not module.params["containers"]: module.fail_json(msg="To use task definitions, a list of containers must be specified") - if 'family' not in module.params or not module.params['family']: + if "family" not in module.params or not module.params["family"]: module.fail_json(msg="To use task definitions, a family must be specified") - network_mode = module.params['network_mode'] - launch_type = module.params['launch_type'] - placement_constraints = module.params['placement_constraints'] - if launch_type == 'FARGATE': - if network_mode != 'awsvpc': + network_mode = module.params["network_mode"] + launch_type = module.params["launch_type"] + placement_constraints = module.params["placement_constraints"] + if launch_type == "FARGATE": + if network_mode != "awsvpc": module.fail_json(msg="To use FARGATE launch type, network_mode must be awsvpc") if placement_constraints: module.fail_json(msg="Task placement constraints are not supported for tasks run on Fargate") - for container in module.params['containers']: - if container.get('links') and network_mode == 'awsvpc': - module.fail_json(msg='links parameter is not supported if network mode is awsvpc.') + for container in module.params["containers"]: + if container.get("links") and network_mode == "awsvpc": + module.fail_json(msg="links parameter is not supported if network mode is awsvpc.") - for environment in container.get('environment', []): - environment['value'] = environment['value'] + for environment in container.get("environment", []): + environment["value"] = environment["value"] - for environment_file in container.get('environmentFiles', []): - if environment_file['type'] != 's3': - module.fail_json(msg='The only supported value for environmentFiles is s3.') + for environment_file in container.get("environmentFiles", []): + if environment_file["type"] != "s3": + module.fail_json(msg="The only supported value for environmentFiles is s3.") - for linux_param in container.get('linuxParameters', {}): - if linux_param == 'maxSwap' and launch_type == 'FARGATE': - module.fail_json(msg='devices parameter is not supported with the FARGATE launch type.') + for linux_param in container.get("linuxParameters", {}): + if linux_param == "maxSwap" and launch_type == "FARGATE": + module.fail_json(msg="devices parameter is not supported with the FARGATE launch type.") - if linux_param == 'maxSwap' and launch_type == 'FARGATE': - module.fail_json(msg='maxSwap parameter is not supported with the FARGATE launch type.') - elif linux_param == 'maxSwap' and int(container['linuxParameters']['maxSwap']) < 0: - module.fail_json(msg='Accepted values for maxSwap are 0 or any positive integer.') + if linux_param == "maxSwap" and launch_type == "FARGATE": + module.fail_json(msg="maxSwap parameter is not supported with the FARGATE launch type.") + elif linux_param == "maxSwap" and int(container["linuxParameters"]["maxSwap"]) < 0: + module.fail_json(msg="Accepted values for maxSwap are 0 or any positive integer.") - if ( - linux_param == 'swappiness' and - (int(container['linuxParameters']['swappiness']) < 0 or int(container['linuxParameters']['swappiness']) > 100) + if linux_param == "swappiness" and ( + int(container["linuxParameters"]["swappiness"]) < 0 + or int(container["linuxParameters"]["swappiness"]) > 100 ): - module.fail_json(msg='Accepted values for swappiness are whole numbers between 0 and 100.') + module.fail_json(msg="Accepted values for swappiness are whole numbers between 0 and 100.") - if linux_param == 'sharedMemorySize' and launch_type == 'FARGATE': - module.fail_json(msg='sharedMemorySize parameter is not supported with the FARGATE launch type.') + if linux_param == "sharedMemorySize" and launch_type == "FARGATE": + module.fail_json(msg="sharedMemorySize parameter is not supported with the FARGATE launch type.") - if linux_param == 'tmpfs' and launch_type == 'FARGATE': - module.fail_json(msg='tmpfs parameter is not supported with the FARGATE launch type.') + if linux_param == "tmpfs" and launch_type == "FARGATE": + module.fail_json(msg="tmpfs parameter is not supported with the FARGATE launch type.") - if container.get('hostname') and network_mode == 'awsvpc': - module.fail_json(msg='hostname parameter is not supported when the awsvpc network mode is used.') + if container.get("hostname") and network_mode == "awsvpc": + module.fail_json(msg="hostname parameter is not supported when the awsvpc network mode is used.") - if container.get('extraHosts') and network_mode == 'awsvpc': - module.fail_json(msg='extraHosts parameter is not supported when the awsvpc network mode is used.') + if container.get("extraHosts") and network_mode == "awsvpc": + module.fail_json(msg="extraHosts parameter is not supported when the awsvpc network mode is used.") - family = module.params['family'] - existing_definitions_in_family = task_mgr.describe_task_definitions(module.params['family']) + family = module.params["family"] + existing_definitions_in_family = task_mgr.describe_task_definitions(module.params["family"]) - if 'revision' in module.params and module.params['revision']: + if "revision" in module.params and module.params["revision"]: # The definition specifies revision. We must guarantee that an active revision of that number will result from this. - revision = int(module.params['revision']) + revision = int(module.params["revision"]) # A revision has been explicitly specified. Attempt to locate a matching revision - tasks_defs_for_revision = [td for td in existing_definitions_in_family if td['revision'] == revision] + tasks_defs_for_revision = [td for td in existing_definitions_in_family if td["revision"] == revision] existing = tasks_defs_for_revision[0] if len(tasks_defs_for_revision) > 0 else None - if existing and existing['status'] != "ACTIVE": + if existing and existing["status"] != "ACTIVE": # We cannot reactivate an inactive revision - module.fail_json(msg="A task in family '%s' already exists for revision %d, but it is inactive" % (family, revision)) + module.fail_json( + msg="A task in family '%s' already exists for revision %d, but it is inactive" % (family, revision) + ) elif not existing: if not existing_definitions_in_family and revision != 1: - module.fail_json(msg="You have specified a revision of %d but a created revision would be 1" % revision) - elif existing_definitions_in_family and existing_definitions_in_family[-1]['revision'] + 1 != revision: - module.fail_json(msg="You have specified a revision of %d but a created revision would be %d" % - (revision, existing_definitions_in_family[-1]['revision'] + 1)) + module.fail_json( + msg="You have specified a revision of %d but a created revision would be 1" % revision + ) + elif existing_definitions_in_family and existing_definitions_in_family[-1]["revision"] + 1 != revision: + module.fail_json( + msg="You have specified a revision of %d but a created revision would be %d" + % (revision, existing_definitions_in_family[-1]["revision"] + 1) + ) else: existing = None @@ -1024,9 +1048,9 @@ def _right_has_values_of_left(left, right): if list_val not in right_list: # if list_val is the port mapping, the key 'protocol' may be absent (but defaults to 'tcp') # fill in that default if absent and see if it is in right_list then - if isinstance(list_val, dict) and not list_val.get('protocol'): + if isinstance(list_val, dict) and not list_val.get("protocol"): modified_list_val = dict(list_val) - modified_list_val.update(protocol='tcp') + modified_list_val.update(protocol="tcp") if modified_list_val in right_list: continue else: @@ -1036,24 +1060,32 @@ def _right_has_values_of_left(left, right): for k, v in right.items(): if v and k not in left: # 'essential' defaults to True when not specified - if k == 'essential' and v is True: + if k == "essential" and v is True: pass else: return False return True - def _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, requested_launch_type, existing_task_definition): - if td['status'] != "ACTIVE": + def _task_definition_matches( + requested_volumes, + requested_containers, + requested_task_role_arn, + requested_launch_type, + existing_task_definition, + ): + if td["status"] != "ACTIVE": return None - if requested_task_role_arn != td.get('taskRoleArn', ""): + if requested_task_role_arn != td.get("taskRoleArn", ""): return None - if requested_launch_type is not None and requested_launch_type not in td.get('requiresCompatibilities', []): + if requested_launch_type is not None and requested_launch_type not in td.get( + "requiresCompatibilities", [] + ): return None - existing_volumes = td.get('volumes', []) or [] + existing_volumes = td.get("volumes", []) or [] if len(requested_volumes) != len(existing_volumes): # Nope. @@ -1071,7 +1103,7 @@ def _task_definition_matches(requested_volumes, requested_containers, requested_ if not found: return None - existing_containers = td.get('containerDefinitions', []) or [] + existing_containers = td.get("containerDefinitions", []) or [] if len(requested_containers) != len(existing_containers): # Nope. @@ -1092,42 +1124,50 @@ def _task_definition_matches(requested_volumes, requested_containers, requested_ # No revision explicitly specified. Attempt to find an active, matching revision that has all the properties requested for td in existing_definitions_in_family: - requested_volumes = module.params['volumes'] or [] - requested_containers = module.params['containers'] or [] - requested_task_role_arn = module.params['task_role_arn'] - requested_launch_type = module.params['launch_type'] - existing = _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, requested_launch_type, td) + requested_volumes = module.params["volumes"] or [] + requested_containers = module.params["containers"] or [] + requested_task_role_arn = module.params["task_role_arn"] + requested_launch_type = module.params["launch_type"] + existing = _task_definition_matches( + requested_volumes, requested_containers, requested_task_role_arn, requested_launch_type, td + ) if existing: break - if existing and not module.params.get('force_create'): + if existing and not module.params.get("force_create"): # Awesome. Have an existing one. Nothing to do. - results['taskdefinition'] = existing + results["taskdefinition"] = existing else: if not module.check_mode: # Doesn't exist. create it. - volumes = module.params.get('volumes', []) or [] - results['taskdefinition'] = task_mgr.register_task(module.params['family'], - module.params['task_role_arn'], - module.params['execution_role_arn'], - module.params['network_mode'], - module.params['containers'], - volumes, - module.params['launch_type'], - module.params['cpu'], - module.params['memory'], - module.params['placement_constraints'],) - results['changed'] = True - - elif module.params['state'] == 'absent': + volumes = module.params.get("volumes", []) or [] + results["taskdefinition"] = task_mgr.register_task( + module.params["family"], + module.params["task_role_arn"], + module.params["execution_role_arn"], + module.params["network_mode"], + module.params["containers"], + volumes, + module.params["launch_type"], + module.params["cpu"], + module.params["memory"], + module.params["placement_constraints"], + ) + results["changed"] = True + + elif module.params["state"] == "absent": # When de-registering a task definition, we can specify the ARN OR the family and revision. - if module.params['state'] == 'absent': - if 'arn' in module.params and module.params['arn'] is not None: - task_to_describe = module.params['arn'] - elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and \ - module.params['revision'] is not None: - task_to_describe = module.params['family'] + ":" + str(module.params['revision']) + if module.params["state"] == "absent": + if "arn" in module.params and module.params["arn"] is not None: + task_to_describe = module.params["arn"] + elif ( + "family" in module.params + and module.params["family"] is not None + and "revision" in module.params + and module.params["revision"] is not None + ): + task_to_describe = module.params["family"] + ":" + str(module.params["revision"]) else: module.fail_json(msg="To use task definitions, an arn or family and revision must be specified") @@ -1137,16 +1177,16 @@ def _task_definition_matches(requested_volumes, requested_containers, requested_ pass else: # It exists, so we should delete it and mark changed. Return info about the task definition deleted - results['taskdefinition'] = existing - if 'status' in existing and existing['status'] == "INACTIVE": - results['changed'] = False + results["taskdefinition"] = existing + if "status" in existing and existing["status"] == "INACTIVE": + results["changed"] = False else: if not module.check_mode: task_mgr.deregister_task(task_to_describe) - results['changed'] = True + results["changed"] = True module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ecs_taskdefinition_info.py b/plugins/modules/ecs_taskdefinition_info.py index b619cd4c4be..5e235096d96 100644 --- a/plugins/modules/ecs_taskdefinition_info.py +++ b/plugins/modules/ecs_taskdefinition_info.py @@ -359,20 +359,20 @@ def main(): argument_spec = dict( - task_definition=dict(required=True, type='str') + task_definition=dict(required=True, type="str"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - ecs = module.client('ecs') + ecs = module.client("ecs") try: - ecs_td = ecs.describe_task_definition(taskDefinition=module.params['task_definition'])['taskDefinition'] + ecs_td = ecs.describe_task_definition(taskDefinition=module.params["task_definition"])["taskDefinition"] except botocore.exceptions.ClientError: ecs_td = {} module.exit_json(changed=False, **camel_dict_to_snake_dict(ecs_td)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/efs.py b/plugins/modules/efs.py index c3d1bc6830a..c1d9f247b34 100644 --- a/plugins/modules/efs.py +++ b/plugins/modules/efs.py @@ -267,35 +267,34 @@ def _index_by_key(key, items): class EFSConnection(object): - DEFAULT_WAIT_TIMEOUT_SECONDS = 0 - STATE_CREATING = 'creating' - STATE_AVAILABLE = 'available' - STATE_DELETING = 'deleting' - STATE_DELETED = 'deleted' + STATE_CREATING = "creating" + STATE_AVAILABLE = "available" + STATE_DELETING = "deleting" + STATE_DELETED = "deleted" def __init__(self, module): - self.connection = module.client('efs') + self.connection = module.client("efs") region = module.region self.module = module self.region = region - self.wait = module.params.get('wait') - self.wait_timeout = module.params.get('wait_timeout') + self.wait = module.params.get("wait") + self.wait_timeout = module.params.get("wait_timeout") def get_file_systems(self, **kwargs): """ - Returns generator of file systems including all attributes of FS + Returns generator of file systems including all attributes of FS """ items = iterate_all( - 'FileSystems', + "FileSystems", self.connection.describe_file_systems, - **kwargs + **kwargs, ) for item in items: - item['Name'] = item['CreationToken'] - item['CreationTime'] = str(item['CreationTime']) + item["Name"] = item["CreationToken"] + item["CreationTime"] = str(item["CreationTime"]) """ In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose @@ -303,90 +302,92 @@ def get_file_systems(self, **kwargs): AWS documentation is available here: https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html """ - item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region) - item['FilesystemAddress'] = '%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region) - if 'Timestamp' in item['SizeInBytes']: - item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp']) - if item['LifeCycleState'] == self.STATE_AVAILABLE: - item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId']) - item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId'])) + item["MountPoint"] = ".%s.efs.%s.amazonaws.com:/" % (item["FileSystemId"], self.region) + item["FilesystemAddress"] = "%s.efs.%s.amazonaws.com:/" % (item["FileSystemId"], self.region) + if "Timestamp" in item["SizeInBytes"]: + item["SizeInBytes"]["Timestamp"] = str(item["SizeInBytes"]["Timestamp"]) + if item["LifeCycleState"] == self.STATE_AVAILABLE: + item["Tags"] = self.get_tags(FileSystemId=item["FileSystemId"]) + item["MountTargets"] = list(self.get_mount_targets(FileSystemId=item["FileSystemId"])) else: - item['Tags'] = {} - item['MountTargets'] = [] + item["Tags"] = {} + item["MountTargets"] = [] yield item def get_tags(self, **kwargs): """ - Returns tag list for selected instance of EFS + Returns tag list for selected instance of EFS """ - tags = self.connection.describe_tags(**kwargs)['Tags'] + tags = self.connection.describe_tags(**kwargs)["Tags"] return tags def get_mount_targets(self, **kwargs): """ - Returns mount targets for selected instance of EFS + Returns mount targets for selected instance of EFS """ targets = iterate_all( - 'MountTargets', + "MountTargets", self.connection.describe_mount_targets, - **kwargs + **kwargs, ) for target in targets: - if target['LifeCycleState'] == self.STATE_AVAILABLE: - target['SecurityGroups'] = list(self.get_security_groups( - MountTargetId=target['MountTargetId'] - )) + if target["LifeCycleState"] == self.STATE_AVAILABLE: + target["SecurityGroups"] = list(self.get_security_groups(MountTargetId=target["MountTargetId"])) else: - target['SecurityGroups'] = [] + target["SecurityGroups"] = [] yield target def get_security_groups(self, **kwargs): """ - Returns security groups for selected instance of EFS + Returns security groups for selected instance of EFS """ return iterate_all( - 'SecurityGroups', + "SecurityGroups", self.connection.describe_mount_target_security_groups, - **kwargs + **kwargs, ) def get_file_system_id(self, name): """ - Returns ID of instance by instance name + Returns ID of instance by instance name """ - info = first_or_default(iterate_all( - 'FileSystems', - self.connection.describe_file_systems, - CreationToken=name - )) - return info and info['FileSystemId'] or None + info = first_or_default( + iterate_all( + "FileSystems", + self.connection.describe_file_systems, + CreationToken=name, + ) + ) + return info and info["FileSystemId"] or None def get_file_system_state(self, name, file_system_id=None): """ - Returns state of filesystem by EFS id/name + Returns state of filesystem by EFS id/name """ - info = first_or_default(iterate_all( - 'FileSystems', - self.connection.describe_file_systems, - CreationToken=name, - FileSystemId=file_system_id - )) - return info and info['LifeCycleState'] or self.STATE_DELETED + info = first_or_default( + iterate_all( + "FileSystems", + self.connection.describe_file_systems, + CreationToken=name, + FileSystemId=file_system_id, + ) + ) + return info and info["LifeCycleState"] or self.STATE_DELETED def get_mount_targets_in_state(self, file_system_id, states=None): """ - Returns states of mount targets of selected EFS with selected state(s) (optional) + Returns states of mount targets of selected EFS with selected state(s) (optional) """ targets = iterate_all( - 'MountTargets', + "MountTargets", self.connection.describe_mount_targets, - FileSystemId=file_system_id + FileSystemId=file_system_id, ) if states: if not isinstance(states, list): states = [states] - targets = filter(lambda target: target['LifeCycleState'] in states, targets) + targets = filter(lambda target: target["LifeCycleState"] in states, targets) return list(targets) @@ -394,47 +395,53 @@ def get_throughput_mode(self, **kwargs): """ Returns throughput mode for selected EFS instance """ - info = first_or_default(iterate_all( - 'FileSystems', - self.connection.describe_file_systems, - **kwargs - )) + info = first_or_default( + iterate_all( + "FileSystems", + self.connection.describe_file_systems, + **kwargs, + ) + ) - return info and info['ThroughputMode'] or None + return info and info["ThroughputMode"] or None def get_provisioned_throughput_in_mibps(self, **kwargs): """ Returns throughput mode for selected EFS instance """ - info = first_or_default(iterate_all( - 'FileSystems', - self.connection.describe_file_systems, - **kwargs - )) - return info.get('ProvisionedThroughputInMibps', None) + info = first_or_default( + iterate_all( + "FileSystems", + self.connection.describe_file_systems, + **kwargs, + ) + ) + return info.get("ProvisionedThroughputInMibps", None) - def create_file_system(self, name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps): + def create_file_system( + self, name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps + ): """ - Creates new filesystem with selected name + Creates new filesystem with selected name """ changed = False state = self.get_file_system_state(name) params = {} - params['CreationToken'] = name - params['PerformanceMode'] = performance_mode + params["CreationToken"] = name + params["PerformanceMode"] = performance_mode if encrypt: - params['Encrypted'] = encrypt + params["Encrypted"] = encrypt if kms_key_id is not None: - params['KmsKeyId'] = kms_key_id + params["KmsKeyId"] = kms_key_id if throughput_mode: - params['ThroughputMode'] = throughput_mode + params["ThroughputMode"] = throughput_mode if provisioned_throughput_in_mibps: - params['ProvisionedThroughputInMibps'] = provisioned_throughput_in_mibps + params["ProvisionedThroughputInMibps"] = provisioned_throughput_in_mibps if state in [self.STATE_DELETING, self.STATE_DELETED]: wait_for( lambda: self.get_file_system_state(name), - self.STATE_DELETED + self.STATE_DELETED, ) try: self.connection.create_file_system(**params) @@ -448,7 +455,7 @@ def create_file_system(self, name, performance_mode, encrypt, kms_key_id, throug wait_for( lambda: self.get_file_system_state(name), self.STATE_AVAILABLE, - self.wait_timeout + self.wait_timeout, ) return changed @@ -465,14 +472,14 @@ def update_file_system(self, name, throughput_mode, provisioned_throughput_in_mi current_throughput = self.get_provisioned_throughput_in_mibps(FileSystemId=fs_id) params = dict() if throughput_mode and throughput_mode != current_mode: - params['ThroughputMode'] = throughput_mode + params["ThroughputMode"] = throughput_mode if provisioned_throughput_in_mibps and provisioned_throughput_in_mibps != current_throughput: - params['ProvisionedThroughputInMibps'] = provisioned_throughput_in_mibps + params["ProvisionedThroughputInMibps"] = provisioned_throughput_in_mibps if len(params) > 0: wait_for( lambda: self.get_file_system_state(name), self.STATE_AVAILABLE, - self.wait_timeout + self.wait_timeout, ) try: self.connection.update_file_system(FileSystemId=fs_id, **params) @@ -490,11 +497,11 @@ def update_lifecycle_policy(self, name, transition_to_ia): if state in [self.STATE_AVAILABLE, self.STATE_CREATING]: fs_id = self.get_file_system_id(name) current_policies = self.connection.describe_lifecycle_configuration(FileSystemId=fs_id) - if transition_to_ia == 'None': + if transition_to_ia == "None": LifecyclePolicies = [] else: - LifecyclePolicies = [{'TransitionToIA': 'AFTER_' + transition_to_ia + '_DAYS'}] - if current_policies.get('LifecyclePolicies') != LifecyclePolicies: + LifecyclePolicies = [{"TransitionToIA": "AFTER_" + transition_to_ia + "_DAYS"}] + if current_policies.get("LifecyclePolicies") != LifecyclePolicies: response = self.connection.put_lifecycle_configuration( FileSystemId=fs_id, LifecyclePolicies=LifecyclePolicies, @@ -504,20 +511,19 @@ def update_lifecycle_policy(self, name, transition_to_ia): def converge_file_system(self, name, tags, purge_tags, targets, throughput_mode, provisioned_throughput_in_mibps): """ - Change attributes (mount targets and tags) of filesystem by name + Change attributes (mount targets and tags) of filesystem by name """ result = False fs_id = self.get_file_system_id(name) if tags is not None: - tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(self.get_tags(FileSystemId=fs_id)), tags, purge_tags) + tags_need_modify, tags_to_delete = compare_aws_tags( + boto3_tag_list_to_ansible_dict(self.get_tags(FileSystemId=fs_id)), tags, purge_tags + ) if tags_to_delete: try: - self.connection.delete_tags( - FileSystemId=fs_id, - TagKeys=tags_to_delete - ) + self.connection.delete_tags(FileSystemId=fs_id, TagKeys=tags_to_delete) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Unable to delete tags.") @@ -526,8 +532,7 @@ def converge_file_system(self, name, tags, purge_tags, targets, throughput_mode, if tags_need_modify: try: self.connection.create_tags( - FileSystemId=fs_id, - Tags=ansible_dict_to_boto3_tag_list(tags_need_modify) + FileSystemId=fs_id, Tags=ansible_dict_to_boto3_tag_list(tags_need_modify) ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Unable to create tags.") @@ -538,54 +543,56 @@ def converge_file_system(self, name, tags, purge_tags, targets, throughput_mode, incomplete_states = [self.STATE_CREATING, self.STATE_DELETING] wait_for( lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)), - 0 + 0, ) - current_targets = _index_by_key('SubnetId', self.get_mount_targets(FileSystemId=fs_id)) - targets = _index_by_key('SubnetId', targets) + current_targets = _index_by_key("SubnetId", self.get_mount_targets(FileSystemId=fs_id)) + targets = _index_by_key("SubnetId", targets) - targets_to_create, intersection, targets_to_delete = dict_diff(current_targets, - targets, True) + targets_to_create, intersection, targets_to_delete = dict_diff(current_targets, targets, True) # To modify mount target it should be deleted and created again - changed = [sid for sid in intersection if not targets_equal(['SubnetId', 'IpAddress', 'NetworkInterfaceId'], - current_targets[sid], targets[sid])] + changed = [ + sid + for sid in intersection + if not targets_equal( + ["SubnetId", "IpAddress", "NetworkInterfaceId"], current_targets[sid], targets[sid] + ) + ] targets_to_delete = list(targets_to_delete) + changed targets_to_create = list(targets_to_create) + changed if targets_to_delete: for sid in targets_to_delete: - self.connection.delete_mount_target( - MountTargetId=current_targets[sid]['MountTargetId'] - ) + self.connection.delete_mount_target(MountTargetId=current_targets[sid]["MountTargetId"]) wait_for( lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)), - 0 + 0, ) result = True if targets_to_create: for sid in targets_to_create: - self.connection.create_mount_target( - FileSystemId=fs_id, - **targets[sid] - ) + self.connection.create_mount_target(FileSystemId=fs_id, **targets[sid]) wait_for( lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)), 0, - self.wait_timeout + self.wait_timeout, ) result = True # If no security groups were passed into the module, then do not change it. - security_groups_to_update = [sid for sid in intersection if - 'SecurityGroups' in targets[sid] and - current_targets[sid]['SecurityGroups'] != targets[sid]['SecurityGroups']] + security_groups_to_update = [ + sid + for sid in intersection + if "SecurityGroups" in targets[sid] + and current_targets[sid]["SecurityGroups"] != targets[sid]["SecurityGroups"] + ] if security_groups_to_update: for sid in security_groups_to_update: self.connection.modify_mount_target_security_groups( - MountTargetId=current_targets[sid]['MountTargetId'], - SecurityGroups=targets[sid].get('SecurityGroups', None) + MountTargetId=current_targets[sid]["MountTargetId"], + SecurityGroups=targets[sid].get("SecurityGroups", None), ) result = True @@ -593,14 +600,14 @@ def converge_file_system(self, name, tags, purge_tags, targets, throughput_mode, def delete_file_system(self, name, file_system_id=None): """ - Removes EFS instance by id/name + Removes EFS instance by id/name """ result = False state = self.get_file_system_state(name, file_system_id) if state in [self.STATE_CREATING, self.STATE_AVAILABLE]: wait_for( lambda: self.get_file_system_state(name), - self.STATE_AVAILABLE + self.STATE_AVAILABLE, ) if not file_system_id: file_system_id = self.get_file_system_id(name) @@ -612,27 +619,27 @@ def delete_file_system(self, name, file_system_id=None): wait_for( lambda: self.get_file_system_state(name), self.STATE_DELETED, - self.wait_timeout + self.wait_timeout, ) return result def delete_mount_targets(self, file_system_id): """ - Removes mount targets by EFS id + Removes mount targets by EFS id """ wait_for( lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_CREATING)), - 0 + 0, ) targets = self.get_mount_targets_in_state(file_system_id, self.STATE_AVAILABLE) for target in targets: - self.connection.delete_mount_target(MountTargetId=target['MountTargetId']) + self.connection.delete_mount_target(MountTargetId=target["MountTargetId"]) wait_for( lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_DELETING)), - 0 + 0, ) return len(targets) > 0 @@ -640,7 +647,7 @@ def delete_mount_targets(self, file_system_id): def iterate_all(attr, map_method, **kwargs): """ - Method creates iterator from result set + Method creates iterator from result set """ args = dict((key, value) for (key, value) in kwargs.items() if value is not None) wait = 1 @@ -649,11 +656,11 @@ def iterate_all(attr, map_method, **kwargs): data = map_method(**args) for elm in data[attr]: yield elm - if 'NextMarker' in data: - args['Marker'] = data['Nextmarker'] + if "NextMarker" in data: + args["Marker"] = data["Nextmarker"] continue break - except is_boto3_error_code('ThrottlingException'): + except is_boto3_error_code("ThrottlingException"): if wait < 600: sleep(wait) wait = wait * 2 @@ -664,7 +671,7 @@ def iterate_all(attr, map_method, **kwargs): def targets_equal(keys, a, b): """ - Method compare two mount targets by specified attributes + Method compare two mount targets by specified attributes """ for key in keys: if key in b and a[key] != b[key]: @@ -675,7 +682,7 @@ def targets_equal(keys, a, b): def dict_diff(dict1, dict2, by_key=False): """ - Helper method to calculate difference of two dictionaries + Helper method to calculate difference of two dictionaries """ keys1 = set(dict1.keys() if by_key else dict1.items()) keys2 = set(dict2.keys() if by_key else dict2.items()) @@ -687,7 +694,7 @@ def dict_diff(dict1, dict2, by_key=False): def first_or_default(items, default=None): """ - Helper method to fetch first element of list (if exists) + Helper method to fetch first element of list (if exists) """ for item in items: return item @@ -696,13 +703,13 @@ def first_or_default(items, default=None): def wait_for(callback, value, timeout=EFSConnection.DEFAULT_WAIT_TIMEOUT_SECONDS): """ - Helper method to wait for desired value returned by callback method + Helper method to wait for desired value returned by callback method """ wait_start = timestamp() while True: if callback() != value: if timeout != 0 and (timestamp() - wait_start > timeout): - raise RuntimeError('Wait timeout exceeded (' + str(timeout) + ' sec)') + raise RuntimeError("Wait timeout exceeded (" + str(timeout) + " sec)") else: sleep(5) continue @@ -711,67 +718,82 @@ def wait_for(callback, value, timeout=EFSConnection.DEFAULT_WAIT_TIMEOUT_SECONDS def main(): """ - Module action handler + Module action handler """ argument_spec = dict( encrypt=dict(required=False, type="bool", default=False), - state=dict(required=False, type='str', choices=["present", "absent"], default="present"), - kms_key_id=dict(required=False, type='str', default=None), - purge_tags=dict(default=True, type='bool'), - id=dict(required=False, type='str', default=None), - name=dict(required=False, type='str', default=None), - tags=dict(required=False, type="dict", aliases=['resource_tags']), - targets=dict(required=False, type="list", default=[], elements='dict'), - performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"), - transition_to_ia=dict(required=False, type='str', choices=["None", "7", "14", "30", "60", "90"], default=None), - throughput_mode=dict(required=False, type='str', choices=["bursting", "provisioned"], default=None), - provisioned_throughput_in_mibps=dict(required=False, type='float'), + state=dict(required=False, type="str", choices=["present", "absent"], default="present"), + kms_key_id=dict(required=False, type="str", default=None), + purge_tags=dict(default=True, type="bool"), + id=dict(required=False, type="str", default=None), + name=dict(required=False, type="str", default=None), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + targets=dict(required=False, type="list", default=[], elements="dict"), + performance_mode=dict( + required=False, type="str", choices=["general_purpose", "max_io"], default="general_purpose" + ), + transition_to_ia=dict(required=False, type="str", choices=["None", "7", "14", "30", "60", "90"], default=None), + throughput_mode=dict(required=False, type="str", choices=["bursting", "provisioned"], default=None), + provisioned_throughput_in_mibps=dict(required=False, type="float"), wait=dict(required=False, type="bool", default=False), - wait_timeout=dict(required=False, type="int", default=0) + wait_timeout=dict(required=False, type="int", default=0), ) module = AnsibleAWSModule(argument_spec=argument_spec) connection = EFSConnection(module) - name = module.params.get('name') - fs_id = module.params.get('id') - tags = module.params.get('tags') + name = module.params.get("name") + fs_id = module.params.get("id") + tags = module.params.get("tags") target_translations = { - 'ip_address': 'IpAddress', - 'security_groups': 'SecurityGroups', - 'subnet_id': 'SubnetId' + "ip_address": "IpAddress", + "security_groups": "SecurityGroups", + "subnet_id": "SubnetId", } - targets = [dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get('targets')] + targets = [ + dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get("targets") + ] performance_mode_translations = { - 'general_purpose': 'generalPurpose', - 'max_io': 'maxIO' + "general_purpose": "generalPurpose", + "max_io": "maxIO", } - encrypt = module.params.get('encrypt') - kms_key_id = module.params.get('kms_key_id') - performance_mode = performance_mode_translations[module.params.get('performance_mode')] - purge_tags = module.params.get('purge_tags') - transition_to_ia = module.params.get('transition_to_ia') - throughput_mode = module.params.get('throughput_mode') - provisioned_throughput_in_mibps = module.params.get('provisioned_throughput_in_mibps') - state = str(module.params.get('state')).lower() + encrypt = module.params.get("encrypt") + kms_key_id = module.params.get("kms_key_id") + performance_mode = performance_mode_translations[module.params.get("performance_mode")] + purge_tags = module.params.get("purge_tags") + transition_to_ia = module.params.get("transition_to_ia") + throughput_mode = module.params.get("throughput_mode") + provisioned_throughput_in_mibps = module.params.get("provisioned_throughput_in_mibps") + state = str(module.params.get("state")).lower() changed = False - if state == 'present': + if state == "present": if not name: - module.fail_json(msg='Name parameter is required for create') + module.fail_json(msg="Name parameter is required for create") - changed = connection.create_file_system(name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps) + changed = connection.create_file_system( + name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps + ) changed = connection.update_file_system(name, throughput_mode, provisioned_throughput_in_mibps) or changed - changed = connection.converge_file_system(name=name, tags=tags, purge_tags=purge_tags, targets=targets, - throughput_mode=throughput_mode, provisioned_throughput_in_mibps=provisioned_throughput_in_mibps) or changed + changed = ( + connection.converge_file_system( + name=name, + tags=tags, + purge_tags=purge_tags, + targets=targets, + throughput_mode=throughput_mode, + provisioned_throughput_in_mibps=provisioned_throughput_in_mibps, + ) + or changed + ) if transition_to_ia: changed |= connection.update_lifecycle_policy(name, transition_to_ia) result = first_or_default(connection.get_file_systems(CreationToken=name)) - elif state == 'absent': + elif state == "absent": if not name and not fs_id: - module.fail_json(msg='Either name or id parameter is required for delete') + module.fail_json(msg="Either name or id parameter is required for delete") changed = connection.delete_file_system(name, fs_id) result = None @@ -780,5 +802,5 @@ def main(): module.exit_json(changed=changed, efs=result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/efs_info.py b/plugins/modules/efs_info.py index e73042555bb..533af10d84d 100644 --- a/plugins/modules/efs_info.py +++ b/plugins/modules/efs_info.py @@ -185,84 +185,86 @@ class EFSConnection(object): - STATE_CREATING = 'creating' - STATE_AVAILABLE = 'available' - STATE_DELETING = 'deleting' - STATE_DELETED = 'deleted' + STATE_CREATING = "creating" + STATE_AVAILABLE = "available" + STATE_DELETING = "deleting" + STATE_DELETED = "deleted" def __init__(self, module): try: - self.connection = module.client('efs') + self.connection = module.client("efs") self.module = module except Exception as e: module.fail_json(msg="Failed to connect to AWS: %s" % to_native(e)) self.region = module.region - @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException']) + @AWSRetry.exponential_backoff(catch_extra_error_codes=["ThrottlingException"]) def list_file_systems(self, **kwargs): """ Returns generator of file systems including all attributes of FS """ - paginator = self.connection.get_paginator('describe_file_systems') - return paginator.paginate(**kwargs).build_full_result()['FileSystems'] + paginator = self.connection.get_paginator("describe_file_systems") + return paginator.paginate(**kwargs).build_full_result()["FileSystems"] - @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException']) + @AWSRetry.exponential_backoff(catch_extra_error_codes=["ThrottlingException"]) def get_tags(self, file_system_id): """ Returns tag list for selected instance of EFS """ - paginator = self.connection.get_paginator('describe_tags') - return boto3_tag_list_to_ansible_dict(paginator.paginate(FileSystemId=file_system_id).build_full_result()['Tags']) + paginator = self.connection.get_paginator("describe_tags") + return boto3_tag_list_to_ansible_dict( + paginator.paginate(FileSystemId=file_system_id).build_full_result()["Tags"] + ) - @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException']) + @AWSRetry.exponential_backoff(catch_extra_error_codes=["ThrottlingException"]) def get_mount_targets(self, file_system_id): """ Returns mount targets for selected instance of EFS """ - paginator = self.connection.get_paginator('describe_mount_targets') - return paginator.paginate(FileSystemId=file_system_id).build_full_result()['MountTargets'] + paginator = self.connection.get_paginator("describe_mount_targets") + return paginator.paginate(FileSystemId=file_system_id).build_full_result()["MountTargets"] - @AWSRetry.jittered_backoff(catch_extra_error_codes=['ThrottlingException']) + @AWSRetry.jittered_backoff(catch_extra_error_codes=["ThrottlingException"]) def get_security_groups(self, mount_target_id): """ Returns security groups for selected instance of EFS """ - return self.connection.describe_mount_target_security_groups(MountTargetId=mount_target_id)['SecurityGroups'] + return self.connection.describe_mount_target_security_groups(MountTargetId=mount_target_id)["SecurityGroups"] def get_mount_targets_data(self, file_systems): for item in file_systems: - if item['life_cycle_state'] == self.STATE_AVAILABLE: + if item["life_cycle_state"] == self.STATE_AVAILABLE: try: - mount_targets = self.get_mount_targets(item['file_system_id']) + mount_targets = self.get_mount_targets(item["file_system_id"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't get EFS targets") for mt in mount_targets: - item['mount_targets'].append(camel_dict_to_snake_dict(mt)) + item["mount_targets"].append(camel_dict_to_snake_dict(mt)) return file_systems def get_security_groups_data(self, file_systems): for item in file_systems: - if item['life_cycle_state'] == self.STATE_AVAILABLE: - for target in item['mount_targets']: - if target['life_cycle_state'] == self.STATE_AVAILABLE: + if item["life_cycle_state"] == self.STATE_AVAILABLE: + for target in item["mount_targets"]: + if target["life_cycle_state"] == self.STATE_AVAILABLE: try: - target['security_groups'] = self.get_security_groups(target['mount_target_id']) + target["security_groups"] = self.get_security_groups(target["mount_target_id"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't get EFS security groups") else: - target['security_groups'] = [] + target["security_groups"] = [] else: - item['tags'] = {} - item['mount_targets'] = [] + item["tags"] = {} + item["mount_targets"] = [] return file_systems def get_file_systems(self, file_system_id=None, creation_token=None): kwargs = dict() if file_system_id: - kwargs['FileSystemId'] = file_system_id + kwargs["FileSystemId"] = file_system_id if creation_token: - kwargs['CreationToken'] = creation_token + kwargs["CreationToken"] = creation_token try: file_systems = self.list_file_systems(**kwargs) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -270,7 +272,7 @@ def get_file_systems(self, file_system_id=None, creation_token=None): results = list() for item in file_systems: - item['CreationTime'] = str(item['CreationTime']) + item["CreationTime"] = str(item["CreationTime"]) """ In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose @@ -278,18 +280,18 @@ def get_file_systems(self, file_system_id=None, creation_token=None): AWS documentation is available here: U(https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html) """ - item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region) - item['FilesystemAddress'] = '%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region) + item["MountPoint"] = ".%s.efs.%s.amazonaws.com:/" % (item["FileSystemId"], self.region) + item["FilesystemAddress"] = "%s.efs.%s.amazonaws.com:/" % (item["FileSystemId"], self.region) - if 'Timestamp' in item['SizeInBytes']: - item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp']) + if "Timestamp" in item["SizeInBytes"]: + item["SizeInBytes"]["Timestamp"] = str(item["SizeInBytes"]["Timestamp"]) result = camel_dict_to_snake_dict(item) - result['tags'] = {} - result['mount_targets'] = [] + result["tags"] = {} + result["mount_targets"] = [] # Set tags *after* doing camel to snake - if result['life_cycle_state'] == self.STATE_AVAILABLE: + if result["life_cycle_state"] == self.STATE_AVAILABLE: try: - result['tags'] = self.get_tags(result['file_system_id']) + result["tags"] = self.get_tags(result["file_system_id"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't get EFS tags") results.append(result) @@ -301,13 +303,14 @@ def prefix_to_attr(attr_id): Helper method to convert ID prefix to mount target attribute """ attr_by_prefix = { - 'fsmt-': 'mount_target_id', - 'subnet-': 'subnet_id', - 'eni-': 'network_interface_id', - 'sg-': 'security_groups' + "fsmt-": "mount_target_id", + "subnet-": "subnet_id", + "eni-": "network_interface_id", + "sg-": "security_groups", } - return first_or_default([attr_name for (prefix, attr_name) in attr_by_prefix.items() - if str(attr_id).startswith(prefix)], 'ip_address') + return first_or_default( + [attr_name for (prefix, attr_name) in attr_by_prefix.items() if str(attr_id).startswith(prefix)], "ip_address" + ) def first_or_default(items, default=None): @@ -334,7 +337,7 @@ def has_targets(available, required): Helper method to determine if mount target requested already exists """ grouped = group_list_of_dict(available) - for (value, field) in required: + for value, field in required: if field not in grouped or value not in grouped[field]: return False return True @@ -357,35 +360,34 @@ def main(): """ argument_spec = dict( id=dict(), - name=dict(aliases=['creation_token']), + name=dict(aliases=["creation_token"]), tags=dict(type="dict", default={}), - targets=dict(type="list", default=[], elements='str') + targets=dict(type="list", default=[], elements="str"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) connection = EFSConnection(module) - name = module.params.get('name') - fs_id = module.params.get('id') - tags = module.params.get('tags') - targets = module.params.get('targets') + name = module.params.get("name") + fs_id = module.params.get("id") + tags = module.params.get("tags") + targets = module.params.get("targets") file_systems_info = connection.get_file_systems(fs_id, name) if tags: - file_systems_info = [item for item in file_systems_info if has_tags(item['tags'], tags)] + file_systems_info = [item for item in file_systems_info if has_tags(item["tags"], tags)] file_systems_info = connection.get_mount_targets_data(file_systems_info) file_systems_info = connection.get_security_groups_data(file_systems_info) if targets: targets = [(item, prefix_to_attr(item)) for item in targets] - file_systems_info = [item for item in file_systems_info if has_targets(item['mount_targets'], targets)] + file_systems_info = [item for item in file_systems_info if has_targets(item["mount_targets"], targets)] module.exit_json(changed=False, efs=file_systems_info) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/efs_tag.py b/plugins/modules/efs_tag.py index 10978c5bf2f..80eb5cc7b9c 100644 --- a/plugins/modules/efs_tag.py +++ b/plugins/modules/efs_tag.py @@ -112,35 +112,35 @@ def get_tags(efs, module, resource): - ''' + """ Get resource tags - ''' + """ try: - return boto3_tag_list_to_ansible_dict(efs.list_tags_for_resource(aws_retry=True, ResourceId=resource)['Tags']) + return boto3_tag_list_to_ansible_dict(efs.list_tags_for_resource(aws_retry=True, ResourceId=resource)["Tags"]) except (BotoCoreError, ClientError) as get_tags_error: - module.fail_json_aws(get_tags_error, msg='Failed to fetch tags for resource {0}'.format(resource)) + module.fail_json_aws(get_tags_error, msg="Failed to fetch tags for resource {0}".format(resource)) def main(): - ''' + """ MAIN - ''' + """ argument_spec = dict( resource=dict(required=True), - tags=dict(type='dict', required=True, aliases=['resource_tags']), - purge_tags=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']) + tags=dict(type="dict", required=True, aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=False), + state=dict(default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - resource = module.params['resource'] - tags = module.params['tags'] - state = module.params['state'] - purge_tags = module.params['purge_tags'] + resource = module.params["resource"] + tags = module.params["tags"] + state = module.params["state"] + purge_tags = module.params["purge_tags"] - result = {'changed': False} + result = {"changed": False} - efs = module.client('efs', retry_decorator=AWSRetry.jittered_backoff()) + efs = module.client("efs", retry_decorator=AWSRetry.jittered_backoff()) current_tags = get_tags(efs, module, resource) @@ -148,7 +148,7 @@ def main(): remove_tags = {} - if state == 'absent': + if state == "absent": for key in tags: if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]): remove_tags[key] = current_tags[key] @@ -157,28 +157,32 @@ def main(): remove_tags[key] = current_tags[key] if remove_tags: - result['changed'] = True - result['removed_tags'] = remove_tags + result["changed"] = True + result["removed_tags"] = remove_tags if not module.check_mode: try: efs.untag_resource(aws_retry=True, ResourceId=resource, TagKeys=list(remove_tags.keys())) except (BotoCoreError, ClientError) as remove_tag_error: - module.fail_json_aws(remove_tag_error, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource)) + module.fail_json_aws( + remove_tag_error, msg="Failed to remove tags {0} from resource {1}".format(remove_tags, resource) + ) - if state == 'present' and add_tags: - result['changed'] = True - result['added_tags'] = add_tags + if state == "present" and add_tags: + result["changed"] = True + result["added_tags"] = add_tags current_tags.update(add_tags) if not module.check_mode: try: tags = ansible_dict_to_boto3_tag_list(add_tags) efs.tag_resource(aws_retry=True, ResourceId=resource, Tags=tags) except (BotoCoreError, ClientError) as set_tag_error: - module.fail_json_aws(set_tag_error, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource)) + module.fail_json_aws( + set_tag_error, msg="Failed to set tags {0} on resource {1}".format(add_tags, resource) + ) - result['tags'] = get_tags(efs, module, resource) + result["tags"] = get_tags(efs, module, resource) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/eks_cluster.py b/plugins/modules/eks_cluster.py index 699c74bdb5b..13ea5997d4d 100644 --- a/plugins/modules/eks_cluster.py +++ b/plugins/modules/eks_cluster.py @@ -177,28 +177,28 @@ def ensure_present(client, module): - name = module.params.get('name') - subnets = module.params['subnets'] - groups = module.params['security_groups'] - wait = module.params.get('wait') + name = module.params.get("name") + subnets = module.params["subnets"] + groups = module.params["security_groups"] + wait = module.params.get("wait") cluster = get_cluster(client, module) try: - ec2 = module.client('ec2') - vpc_id = ec2.describe_subnets(SubnetIds=[subnets[0]])['Subnets'][0]['VpcId'] + ec2 = module.client("ec2") + vpc_id = ec2.describe_subnets(SubnetIds=[subnets[0]])["Subnets"][0]["VpcId"] groups = get_ec2_security_group_ids_from_names(groups, ec2, vpc_id) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't lookup security groups") if cluster: - if set(cluster['resourcesVpcConfig']['subnetIds']) != set(subnets): + if set(cluster["resourcesVpcConfig"]["subnetIds"]) != set(subnets): module.fail_json(msg="Cannot modify subnets of existing cluster") - if set(cluster['resourcesVpcConfig']['securityGroupIds']) != set(groups): + if set(cluster["resourcesVpcConfig"]["securityGroupIds"]) != set(groups): module.fail_json(msg="Cannot modify security groups of existing cluster") - if module.params.get('version') and module.params.get('version') != cluster['version']: + if module.params.get("version") and module.params.get("version") != cluster["version"]: module.fail_json(msg="Cannot modify version of existing cluster") if wait: - wait_until(client, module, 'cluster_active') + wait_until(client, module, "cluster_active") # Ensure that fields that are only available for active clusters are # included in the returned value cluster = get_cluster(client, module) @@ -208,24 +208,23 @@ def ensure_present(client, module): if module.check_mode: module.exit_json(changed=True) try: - params = dict(name=name, - roleArn=module.params['role_arn'], - resourcesVpcConfig=dict( - subnetIds=subnets, - securityGroupIds=groups), - ) - if module.params['version']: - params['version'] = module.params['version'] - if module.params['tags']: - params['tags'] = module.params['tags'] - cluster = client.create_cluster(**params)['cluster'] + params = dict( + name=name, + roleArn=module.params["role_arn"], + resourcesVpcConfig=dict(subnetIds=subnets, securityGroupIds=groups), + ) + if module.params["version"]: + params["version"] = module.params["version"] + if module.params["tags"]: + params["tags"] = module.params["tags"] + cluster = client.create_cluster(**params)["cluster"] except botocore.exceptions.EndpointConnectionError as e: module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't create cluster %s" % name) if wait: - wait_until(client, module, 'cluster_active') + wait_until(client, module, "cluster_active") # Ensure that fields that are only available for active clusters are # included in the returned value cluster = get_cluster(client, module) @@ -234,44 +233,47 @@ def ensure_present(client, module): def ensure_absent(client, module): - name = module.params.get('name') + name = module.params.get("name") existing = get_cluster(client, module) - wait = module.params.get('wait') + wait = module.params.get("wait") if not existing: module.exit_json(changed=False) if not module.check_mode: try: - client.delete_cluster(name=module.params['name']) + client.delete_cluster(name=module.params["name"]) except botocore.exceptions.EndpointConnectionError as e: module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't delete cluster %s" % name) if wait: - wait_until(client, module, 'cluster_deleted') + wait_until(client, module, "cluster_deleted") module.exit_json(changed=True) def get_cluster(client, module): - name = module.params.get('name') + name = module.params.get("name") try: - return client.describe_cluster(name=name)['cluster'] - except is_boto3_error_code('ResourceNotFoundException'): + return client.describe_cluster(name=name)["cluster"] + except is_boto3_error_code("ResourceNotFoundException"): return None except botocore.exceptions.EndpointConnectionError as e: # pylint: disable=duplicate-except module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't get cluster %s" % name) -def wait_until(client, module, waiter_name='cluster_active'): - name = module.params.get('name') - wait_timeout = module.params.get('wait_timeout') +def wait_until(client, module, waiter_name="cluster_active"): + name = module.params.get("name") + wait_timeout = module.params.get("wait_timeout") waiter = get_waiter(client, waiter_name) attempts = 1 + int(wait_timeout / waiter.config.delay) - waiter.wait(name=name, WaiterConfig={'MaxAttempts': attempts}) + waiter.wait(name=name, WaiterConfig={"MaxAttempts": attempts}) def main(): @@ -279,27 +281,27 @@ def main(): name=dict(required=True), version=dict(), role_arn=dict(), - subnets=dict(type='list', elements='str'), - security_groups=dict(type='list', elements='str'), - state=dict(choices=['absent', 'present'], default='present'), - tags=dict(type='dict', required=False), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=1200, type='int') + subnets=dict(type="list", elements="str"), + security_groups=dict(type="list", elements="str"), + state=dict(choices=["absent", "present"], default="present"), + tags=dict(type="dict", required=False), + wait=dict(default=False, type="bool"), + wait_timeout=dict(default=1200, type="int"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_if=[['state', 'present', ['role_arn', 'subnets', 'security_groups']]], + required_if=[["state", "present", ["role_arn", "subnets", "security_groups"]]], supports_check_mode=True, ) - client = module.client('eks') + client = module.client("eks") - if module.params.get('state') == 'present': + if module.params.get("state") == "present": ensure_present(client, module) else: ensure_absent(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/eks_fargate_profile.py b/plugins/modules/eks_fargate_profile.py index abc4dd09f90..71a632a2223 100644 --- a/plugins/modules/eks_fargate_profile.py +++ b/plugins/modules/eks_fargate_profile.py @@ -180,58 +180,58 @@ def validate_tags(client, module, fargate_profile): changed = False - desired_tags = module.params.get('tags') + desired_tags = module.params.get("tags") if desired_tags is None: return False try: - existing_tags = client.list_tags_for_resource(resourceArn=fargate_profile['fargateProfileArn'])['tags'] - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get('purge_tags')) + existing_tags = client.list_tags_for_resource(resourceArn=fargate_profile["fargateProfileArn"])["tags"] + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get("purge_tags")) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to list or compare tags for Fargate Profile %s' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to list or compare tags for Fargate Profile %s" % module.params.get("name")) if tags_to_remove: changed = True if not module.check_mode: try: - client.untag_resource(resourceArn=fargate_profile['fargateProfileArn'], tagKeys=tags_to_remove) + client.untag_resource(resourceArn=fargate_profile["fargateProfileArn"], tagKeys=tags_to_remove) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Fargate Profile %s' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to set tags for Fargate Profile %s" % module.params.get("name")) if tags_to_add: changed = True if not module.check_mode: try: - client.tag_resource(resourceArn=fargate_profile['fargateProfileArn'], tags=tags_to_add) + client.tag_resource(resourceArn=fargate_profile["fargateProfileArn"], tags=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Fargate Profile %s' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to set tags for Fargate Profile %s" % module.params.get("name")) return changed def create_or_update_fargate_profile(client, module): - name = module.params.get('name') - subnets = module.params['subnets'] - role_arn = module.params['role_arn'] - cluster_name = module.params['cluster_name'] - selectors = module.params['selectors'] - tags = module.params['tags'] or {} - wait = module.params.get('wait') + name = module.params.get("name") + subnets = module.params["subnets"] + role_arn = module.params["role_arn"] + cluster_name = module.params["cluster_name"] + selectors = module.params["selectors"] + tags = module.params["tags"] or {} + wait = module.params.get("wait") fargate_profile = get_fargate_profile(client, module, name, cluster_name) if fargate_profile: changed = False - if set(fargate_profile['podExecutionRoleArn']) != set(role_arn): + if set(fargate_profile["podExecutionRoleArn"]) != set(role_arn): module.fail_json(msg="Cannot modify Execution Role") - if set(fargate_profile['subnets']) != set(subnets): + if set(fargate_profile["subnets"]) != set(subnets): module.fail_json(msg="Cannot modify Subnets") - if fargate_profile['selectors'] != selectors: + if fargate_profile["selectors"] != selectors: module.fail_json(msg="Cannot modify Selectors") changed = validate_tags(client, module, fargate_profile) if wait: - wait_until(client, module, 'fargate_profile_active', name, cluster_name) + wait_until(client, module, "fargate_profile_active", name, cluster_name) fargate_profile = get_fargate_profile(client, module, name, cluster_name) module.exit_json(changed=changed, **camel_dict_to_snake_dict(fargate_profile)) @@ -242,29 +242,30 @@ def create_or_update_fargate_profile(client, module): check_profiles_status(client, module, cluster_name) try: - params = dict(fargateProfileName=name, - podExecutionRoleArn=role_arn, - subnets=subnets, - clusterName=cluster_name, - selectors=selectors, - tags=tags - ) + params = dict( + fargateProfileName=name, + podExecutionRoleArn=role_arn, + subnets=subnets, + clusterName=cluster_name, + selectors=selectors, + tags=tags, + ) fargate_profile = client.create_fargate_profile(**params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't create fargate profile %s" % name) if wait: - wait_until(client, module, 'fargate_profile_active', name, cluster_name) + wait_until(client, module, "fargate_profile_active", name, cluster_name) fargate_profile = get_fargate_profile(client, module, name, cluster_name) module.exit_json(changed=True, **camel_dict_to_snake_dict(fargate_profile)) def delete_fargate_profile(client, module): - name = module.params.get('name') - cluster_name = module.params['cluster_name'] + name = module.params.get("name") + cluster_name = module.params["cluster_name"] existing = get_fargate_profile(client, module, name, cluster_name) - wait = module.params.get('wait') + wait = module.params.get("wait") if not existing or existing["status"] == "DELETING": module.exit_json(changed=False) @@ -276,17 +277,20 @@ def delete_fargate_profile(client, module): module.fail_json_aws(e, msg="Couldn't delete fargate profile %s" % name) if wait: - wait_until(client, module, 'fargate_profile_deleted', name, cluster_name) + wait_until(client, module, "fargate_profile_deleted", name, cluster_name) module.exit_json(changed=True) def get_fargate_profile(client, module, name, cluster_name): try: - return client.describe_fargate_profile(clusterName=cluster_name, fargateProfileName=name)['fargateProfile'] - except is_boto3_error_code('ResourceNotFoundException'): + return client.describe_fargate_profile(clusterName=cluster_name, fargateProfileName=name)["fargateProfile"] + except is_boto3_error_code("ResourceNotFoundException"): return None - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't get fargate profile") @@ -297,20 +301,24 @@ def check_profiles_status(client, module, cluster_name): for name in list_profiles["fargateProfileNames"]: fargate_profile = get_fargate_profile(client, module, name, cluster_name) - if fargate_profile["status"] == 'CREATING': - wait_until(client, module, 'fargate_profile_active', fargate_profile["fargateProfileName"], cluster_name) - elif fargate_profile["status"] == 'DELETING': - wait_until(client, module, 'fargate_profile_deleted', fargate_profile["fargateProfileName"], cluster_name) + if fargate_profile["status"] == "CREATING": + wait_until( + client, module, "fargate_profile_active", fargate_profile["fargateProfileName"], cluster_name + ) + elif fargate_profile["status"] == "DELETING": + wait_until( + client, module, "fargate_profile_deleted", fargate_profile["fargateProfileName"], cluster_name + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't not find EKS cluster") def wait_until(client, module, waiter_name, name, cluster_name): - wait_timeout = module.params.get('wait_timeout') + wait_timeout = module.params.get("wait_timeout") waiter = get_waiter(client, waiter_name) attempts = 1 + int(wait_timeout / waiter.config.delay) try: - waiter.wait(clusterName=cluster_name, fargateProfileName=name, WaiterConfig={'MaxAttempts': attempts}) + waiter.wait(clusterName=cluster_name, fargateProfileName=name, WaiterConfig={"MaxAttempts": attempts}) except botocore.exceptions.WaiterError as e: module.fail_json_aws(e, msg="An error occurred waiting") @@ -320,34 +328,38 @@ def main(): name=dict(required=True), cluster_name=dict(required=True), role_arn=dict(), - subnets=dict(type='list', elements='str'), - selectors=dict(type='list', elements='dict', options=dict( - namespace=dict(type='str'), - labels=dict(type='dict', default={}) - )), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - state=dict(choices=['absent', 'present'], default='present'), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=1200, type='int') + subnets=dict(type="list", elements="str"), + selectors=dict( + type="list", + elements="dict", + options=dict( + namespace=dict(type="str"), + labels=dict(type="dict", default={}), + ), + ), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + state=dict(choices=["absent", "present"], default="present"), + wait=dict(default=False, type="bool"), + wait_timeout=dict(default=1200, type="int"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_if=[['state', 'present', ['role_arn', 'subnets', 'selectors']]], + required_if=[["state", "present", ["role_arn", "subnets", "selectors"]]], supports_check_mode=True, ) try: - client = module.client('eks') + client = module.client("eks") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't connect to AWS") - if module.params.get('state') == 'present': + if module.params.get("state") == "present": create_or_update_fargate_profile(client, module) else: delete_fargate_profile(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/eks_nodegroup.py b/plugins/modules/eks_nodegroup.py index f6ce192c96c..6704af1af09 100644 --- a/plugins/modules/eks_nodegroup.py +++ b/plugins/modules/eks_nodegroup.py @@ -362,29 +362,29 @@ def validate_tags(client, module, nodegroup): changed = False - desired_tags = module.params.get('tags') + desired_tags = module.params.get("tags") if desired_tags is None: return False try: - existing_tags = client.list_tags_for_resource(resourceArn=nodegroup['nodegroupArn'])['tags'] - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get('purge_tags')) + existing_tags = client.list_tags_for_resource(resourceArn=nodegroup["nodegroupArn"])["tags"] + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get("purge_tags")) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to list or compare tags for Nodegroup %s.' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to list or compare tags for Nodegroup %s." % module.params.get("name")) if tags_to_remove: if not module.check_mode: changed = True try: - client.untag_resource(aws_retry=True, ResourceArn=nodegroup['nodegroupArn'], tagKeys=tags_to_remove) + client.untag_resource(aws_retry=True, ResourceArn=nodegroup["nodegroupArn"], tagKeys=tags_to_remove) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Nodegroup %s.' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to set tags for Nodegroup %s." % module.params.get("name")) if tags_to_add: if not module.check_mode: changed = True try: - client.tag_resource(aws_retry=True, ResourceArn=nodegroup['nodegroupArn'], tags=tags_to_add) + client.tag_resource(aws_retry=True, ResourceArn=nodegroup["nodegroupArn"], tags=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Nodegroup %s.' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to set tags for Nodegroup %s." % module.params.get("name")) return changed @@ -405,24 +405,24 @@ def compare_taints(nodegroup_taints, param_taints): def validate_taints(client, module, nodegroup, param_taints): changed = False params = dict() - params['clusterName'] = nodegroup['clusterName'] - params['nodegroupName'] = nodegroup['nodegroupName'] - params['taints'] = [] - if 'taints' not in nodegroup: - nodegroup['taints'] = [] - taints_to_add_or_update, taints_to_unset = compare_taints(nodegroup['taints'], param_taints) + params["clusterName"] = nodegroup["clusterName"] + params["nodegroupName"] = nodegroup["nodegroupName"] + params["taints"] = [] + if "taints" not in nodegroup: + nodegroup["taints"] = [] + taints_to_add_or_update, taints_to_unset = compare_taints(nodegroup["taints"], param_taints) if taints_to_add_or_update: - params['taints']['addOrUpdateTaints'] = taints_to_add_or_update + params["taints"]["addOrUpdateTaints"] = taints_to_add_or_update if taints_to_unset: - params['taints']['removeTaints'] = taints_to_unset - if params['taints']: + params["taints"]["removeTaints"] = taints_to_unset + if params["taints"]: if not module.check_mode: changed = True try: client.update_nodegroup_config(**params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set taints for Nodegroup %s.' % params['nodegroupName']) + module.fail_json_aws(e, msg="Unable to set taints for Nodegroup %s." % params["nodegroupName"]) return changed @@ -443,109 +443,114 @@ def compare_labels(nodegroup_labels, param_labels): def validate_labels(client, module, nodegroup, param_labels): changed = False params = dict() - params['clusterName'] = nodegroup['clusterName'] - params['nodegroupName'] = nodegroup['nodegroupName'] - params['labels'] = {} - labels_to_add_or_update, labels_to_unset = compare_labels(nodegroup['labels'], param_labels) + params["clusterName"] = nodegroup["clusterName"] + params["nodegroupName"] = nodegroup["nodegroupName"] + params["labels"] = {} + labels_to_add_or_update, labels_to_unset = compare_labels(nodegroup["labels"], param_labels) if labels_to_add_or_update: - params['labels']['addOrUpdateLabels'] = labels_to_add_or_update + params["labels"]["addOrUpdateLabels"] = labels_to_add_or_update if labels_to_unset: - params['labels']['removeLabels'] = labels_to_unset - if params['labels']: + params["labels"]["removeLabels"] = labels_to_unset + if params["labels"]: if not module.check_mode: changed = True try: client.update_nodegroup_config(**params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set labels for Nodegroup %s.' % params['nodegroupName']) + module.fail_json_aws(e, msg="Unable to set labels for Nodegroup %s." % params["nodegroupName"]) return changed def compare_params(module, params, nodegroup): - for param in ['nodeRole', 'subnets', 'diskSize', 'instanceTypes', 'amiTypes', 'remoteAccess', 'capacityType']: + for param in ["nodeRole", "subnets", "diskSize", "instanceTypes", "amiTypes", "remoteAccess", "capacityType"]: if (param in nodegroup) and (param in params): - if (nodegroup[param] != params[param]): + if nodegroup[param] != params[param]: module.fail_json(msg="Cannot modify parameter %s." % param) - if ('launchTemplate' not in nodegroup) and ('launchTemplate' in params): + if ("launchTemplate" not in nodegroup) and ("launchTemplate" in params): module.fail_json(msg="Cannot add Launch Template in this Nodegroup.") - if nodegroup['updateConfig'] != params['updateConfig']: + if nodegroup["updateConfig"] != params["updateConfig"]: return True - if nodegroup['scalingConfig'] != params['scalingConfig']: + if nodegroup["scalingConfig"] != params["scalingConfig"]: return True return False def compare_params_launch_template(module, params, nodegroup): - if 'launchTemplate' not in params: + if "launchTemplate" not in params: module.fail_json(msg="Cannot exclude Launch Template in this Nodegroup.") else: - for key in ['name', 'id']: - if (key in params['launchTemplate']) and (params['launchTemplate'][key] != nodegroup['launchTemplate'][key]): + for key in ["name", "id"]: + if (key in params["launchTemplate"]) and ( + params["launchTemplate"][key] != nodegroup["launchTemplate"][key] + ): module.fail_json(msg="Cannot modify Launch Template %s." % key) - if ('version' in params['launchTemplate']) and (params['launchTemplate']['version'] != nodegroup['launchTemplate']['version']): + if ("version" in params["launchTemplate"]) and ( + params["launchTemplate"]["version"] != nodegroup["launchTemplate"]["version"] + ): return True return False def create_or_update_nodegroups(client, module): - changed = False params = dict() - params['nodegroupName'] = module.params['name'] - params['clusterName'] = module.params['cluster_name'] - params['nodeRole'] = module.params['node_role'] - params['subnets'] = module.params['subnets'] - params['tags'] = module.params['tags'] or {} - if module.params['ami_type'] is not None: - params['amiType'] = module.params['ami_type'] - if module.params['disk_size'] is not None: - params['diskSize'] = module.params['disk_size'] - if module.params['instance_types'] is not None: - params['instanceTypes'] = module.params['instance_types'] - if module.params['launch_template'] is not None: - params['launchTemplate'] = dict() - if module.params['launch_template']['id'] is not None: - params['launchTemplate']['id'] = module.params['launch_template']['id'] - if module.params['launch_template']['version'] is not None: - params['launchTemplate']['version'] = module.params['launch_template']['version'] - if module.params['launch_template']['name'] is not None: - params['launchTemplate']['name'] = module.params['launch_template']['name'] - if module.params['release_version'] is not None: - params['releaseVersion'] = module.params['release_version'] - if module.params['remote_access'] is not None: - params['remoteAccess'] = dict() - if module.params['remote_access']['ec2_ssh_key'] is not None: - params['remoteAccess']['ec2SshKey'] = module.params['remote_access']['ec2_ssh_key'] - if module.params['remote_access']['source_sg'] is not None: - params['remoteAccess']['sourceSecurityGroups'] = module.params['remote_access']['source_sg'] - if module.params['capacity_type'] is not None: - params['capacityType'] = module.params['capacity_type'].upper() - if module.params['labels'] is not None: - params['labels'] = module.params['labels'] - if module.params['taints'] is not None: - params['taints'] = module.params['taints'] - if module.params['update_config'] is not None: - params['updateConfig'] = dict() - if module.params['update_config']['max_unavailable'] is not None: - params['updateConfig']['maxUnavailable'] = module.params['update_config']['max_unavailable'] - if module.params['update_config']['max_unavailable_percentage'] is not None: - params['updateConfig']['maxUnavailablePercentage'] = module.params['update_config']['max_unavailable_percentage'] - if module.params['scaling_config'] is not None: - params['scalingConfig'] = snake_dict_to_camel_dict(module.params['scaling_config']) - - wait = module.params.get('wait') - nodegroup = get_nodegroup(client, module, params['nodegroupName'], params['clusterName']) + params["nodegroupName"] = module.params["name"] + params["clusterName"] = module.params["cluster_name"] + params["nodeRole"] = module.params["node_role"] + params["subnets"] = module.params["subnets"] + params["tags"] = module.params["tags"] or {} + if module.params["ami_type"] is not None: + params["amiType"] = module.params["ami_type"] + if module.params["disk_size"] is not None: + params["diskSize"] = module.params["disk_size"] + if module.params["instance_types"] is not None: + params["instanceTypes"] = module.params["instance_types"] + if module.params["launch_template"] is not None: + params["launchTemplate"] = dict() + if module.params["launch_template"]["id"] is not None: + params["launchTemplate"]["id"] = module.params["launch_template"]["id"] + if module.params["launch_template"]["version"] is not None: + params["launchTemplate"]["version"] = module.params["launch_template"]["version"] + if module.params["launch_template"]["name"] is not None: + params["launchTemplate"]["name"] = module.params["launch_template"]["name"] + if module.params["release_version"] is not None: + params["releaseVersion"] = module.params["release_version"] + if module.params["remote_access"] is not None: + params["remoteAccess"] = dict() + if module.params["remote_access"]["ec2_ssh_key"] is not None: + params["remoteAccess"]["ec2SshKey"] = module.params["remote_access"]["ec2_ssh_key"] + if module.params["remote_access"]["source_sg"] is not None: + params["remoteAccess"]["sourceSecurityGroups"] = module.params["remote_access"]["source_sg"] + if module.params["capacity_type"] is not None: + params["capacityType"] = module.params["capacity_type"].upper() + if module.params["labels"] is not None: + params["labels"] = module.params["labels"] + if module.params["taints"] is not None: + params["taints"] = module.params["taints"] + if module.params["update_config"] is not None: + params["updateConfig"] = dict() + if module.params["update_config"]["max_unavailable"] is not None: + params["updateConfig"]["maxUnavailable"] = module.params["update_config"]["max_unavailable"] + if module.params["update_config"]["max_unavailable_percentage"] is not None: + params["updateConfig"]["maxUnavailablePercentage"] = module.params["update_config"][ + "max_unavailable_percentage" + ] + if module.params["scaling_config"] is not None: + params["scalingConfig"] = snake_dict_to_camel_dict(module.params["scaling_config"]) + + wait = module.params.get("wait") + nodegroup = get_nodegroup(client, module, params["nodegroupName"], params["clusterName"]) if nodegroup: update_params = dict() - update_params['clusterName'] = params['clusterName'] - update_params['nodegroupName'] = params['nodegroupName'] + update_params["clusterName"] = params["clusterName"] + update_params["nodegroupName"] = params["nodegroupName"] - if 'launchTemplate' in nodegroup: + if "launchTemplate" in nodegroup: if compare_params_launch_template(module, params, nodegroup): - update_params['launchTemplate'] = params['launchTemplate'] + update_params["launchTemplate"] = params["launchTemplate"] if not module.check_mode: try: client.update_nodegroup_version(**update_params) @@ -555,10 +560,10 @@ def create_or_update_nodegroups(client, module): if compare_params(module, params, nodegroup): try: - if 'launchTemplate' in update_params: - update_params.pop('launchTemplate') - update_params['scalingConfig'] = params['scalingConfig'] - update_params['updateConfig'] = params['updateConfig'] + if "launchTemplate" in update_params: + update_params.pop("launchTemplate") + update_params["scalingConfig"] = params["scalingConfig"] + update_params["updateConfig"] = params["updateConfig"] if not module.check_mode: client.update_nodegroup_config(**update_params) @@ -570,15 +575,15 @@ def create_or_update_nodegroups(client, module): changed |= validate_tags(client, module, nodegroup) - changed |= validate_labels(client, module, nodegroup, params['labels']) + changed |= validate_labels(client, module, nodegroup, params["labels"]) - if 'taints' in nodegroup: - changed |= validate_taints(client, module, nodegroup, params['taints']) + if "taints" in nodegroup: + changed |= validate_taints(client, module, nodegroup, params["taints"]) if wait: - wait_until(client, module, 'nodegroup_active', params['nodegroupName'], params['clusterName']) + wait_until(client, module, "nodegroup_active", params["nodegroupName"], params["clusterName"]) - nodegroup = get_nodegroup(client, module, params['nodegroupName'], params['clusterName']) + nodegroup = get_nodegroup(client, module, params["nodegroupName"], params["clusterName"]) module.exit_json(changed=changed, **camel_dict_to_snake_dict(nodegroup)) @@ -588,22 +593,22 @@ def create_or_update_nodegroups(client, module): try: nodegroup = client.create_nodegroup(**params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't create Nodegroup %s." % params['nodegroupName']) + module.fail_json_aws(e, msg="Couldn't create Nodegroup %s." % params["nodegroupName"]) if wait: - wait_until(client, module, 'nodegroup_active', params['nodegroupName'], params['clusterName']) - nodegroup = get_nodegroup(client, module, params['nodegroupName'], params['clusterName']) + wait_until(client, module, "nodegroup_active", params["nodegroupName"], params["clusterName"]) + nodegroup = get_nodegroup(client, module, params["nodegroupName"], params["clusterName"]) module.exit_json(changed=True, **camel_dict_to_snake_dict(nodegroup)) def delete_nodegroups(client, module): - name = module.params.get('name') - clusterName = module.params['cluster_name'] + name = module.params.get("name") + clusterName = module.params["cluster_name"] existing = get_nodegroup(client, module, name, clusterName) - wait = module.params.get('wait') - if not existing or existing['status'] == 'DELETING': - module.exit_json(changed=False, msg='Nodegroup not exists or in DELETING status.') + wait = module.params.get("wait") + if not existing or existing["status"] == "DELETING": + module.exit_json(changed=False, msg="Nodegroup not exists or in DELETING status.") if not module.check_mode: try: client.delete_nodegroup(clusterName=clusterName, nodegroupName=name) @@ -611,104 +616,138 @@ def delete_nodegroups(client, module): module.fail_json_aws(e, msg="Couldn't delete Nodegroup %s." % name) if wait: - wait_until(client, module, 'nodegroup_deleted', name, clusterName) + wait_until(client, module, "nodegroup_deleted", name, clusterName) module.exit_json(changed=True) def get_nodegroup(client, module, nodegroup_name, cluster_name): try: - return client.describe_nodegroup(clusterName=cluster_name, nodegroupName=nodegroup_name)['nodegroup'] - except is_boto3_error_code('ResourceNotFoundException'): + return client.describe_nodegroup(clusterName=cluster_name, nodegroupName=nodegroup_name)["nodegroup"] + except is_boto3_error_code("ResourceNotFoundException"): return None - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't get Nodegroup %s." % nodegroup_name) def wait_until(client, module, waiter_name, nodegroup_name, cluster_name): - wait_timeout = module.params.get('wait_timeout') + wait_timeout = module.params.get("wait_timeout") waiter = get_waiter(client, waiter_name) attempts = 1 + int(wait_timeout / waiter.config.delay) try: - waiter.wait(clusterName=cluster_name, nodegroupName=nodegroup_name, WaiterConfig={'MaxAttempts': attempts}) + waiter.wait(clusterName=cluster_name, nodegroupName=nodegroup_name, WaiterConfig={"MaxAttempts": attempts}) except botocore.exceptions.WaiterError as e: module.fail_json_aws(e, msg="An error occurred waiting") def main(): argument_spec = dict( - name=dict(type='str', required=True), - cluster_name=dict(type='str', required=True), + name=dict(type="str", required=True), + cluster_name=dict(type="str", required=True), node_role=dict(), - subnets=dict(type='list', elements='str'), - scaling_config=dict(type='dict', default={'min_size': 1, 'max_size': 2, 'desired_size': 1}, options=dict( - min_size=dict(type='int'), - max_size=dict(type='int'), - desired_size=dict(type='int') - )), - disk_size=dict(type='int'), - instance_types=dict(type='list', elements='str'), - ami_type=dict(choices=['AL2_x86_64', 'AL2_x86_64_GPU', 'AL2_ARM_64', 'CUSTOM', 'BOTTLEROCKET_ARM_64', 'BOTTLEROCKET_x86_64']), - remote_access=dict(type='dict', options=dict( - ec2_ssh_key=dict(no_log=True), - source_sg=dict(type='list', elements='str') - )), - update_config=dict(type='dict', default={'max_unavailable': 1}, options=dict( - max_unavailable=dict(type='int'), - max_unavailable_percentage=dict(type='int') - )), - labels=dict(type='dict', default={}), - taints=dict(type='list', elements='dict', default=[], options=dict( - key=dict(type='str', no_log=False,), - value=dict(type='str'), - effect=dict(type='str', choices=['NO_SCHEDULE', 'NO_EXECUTE', 'PREFER_NO_SCHEDULE']) - )), - launch_template=dict(type='dict', options=dict( - name=dict(type='str'), - version=dict(type='str'), - id=dict(type='str') - )), - capacity_type=dict(choices=['ON_DEMAND', 'SPOT'], default='ON_DEMAND'), + subnets=dict(type="list", elements="str"), + scaling_config=dict( + type="dict", + default={"min_size": 1, "max_size": 2, "desired_size": 1}, + options=dict( + min_size=dict(type="int"), + max_size=dict(type="int"), + desired_size=dict(type="int"), + ), + ), + disk_size=dict(type="int"), + instance_types=dict(type="list", elements="str"), + ami_type=dict( + choices=[ + "AL2_x86_64", + "AL2_x86_64_GPU", + "AL2_ARM_64", + "CUSTOM", + "BOTTLEROCKET_ARM_64", + "BOTTLEROCKET_x86_64", + ] + ), + remote_access=dict( + type="dict", + options=dict( + ec2_ssh_key=dict(no_log=True), + source_sg=dict(type="list", elements="str"), + ), + ), + update_config=dict( + type="dict", + default={"max_unavailable": 1}, + options=dict( + max_unavailable=dict(type="int"), + max_unavailable_percentage=dict(type="int"), + ), + ), + labels=dict(type="dict", default={}), + taints=dict( + type="list", + elements="dict", + default=[], + options=dict( + key=dict( + type="str", + no_log=False, + ), + value=dict(type="str"), + effect=dict(type="str", choices=["NO_SCHEDULE", "NO_EXECUTE", "PREFER_NO_SCHEDULE"]), + ), + ), + launch_template=dict( + type="dict", + options=dict( + name=dict(type="str"), + version=dict(type="str"), + id=dict(type="str"), + ), + ), + capacity_type=dict(choices=["ON_DEMAND", "SPOT"], default="ON_DEMAND"), release_version=dict(), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - state=dict(choices=['absent', 'present'], default='present'), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=1200, type='int') + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + state=dict(choices=["absent", "present"], default="present"), + wait=dict(default=False, type="bool"), + wait_timeout=dict(default=1200, type="int"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_if=[['state', 'present', ['node_role', 'subnets']]], + required_if=[["state", "present", ["node_role", "subnets"]]], mutually_exclusive=[ - ('launch_template', 'instance_types'), - ('launch_template', 'disk_size'), - ('launch_template', 'remote_access'), - ('launch_template', 'ami_type') + ("launch_template", "instance_types"), + ("launch_template", "disk_size"), + ("launch_template", "remote_access"), + ("launch_template", "ami_type"), ], supports_check_mode=True, ) - if module.params['launch_template'] is None: - if module.params['disk_size'] is None: - module.params['disk_size'] = 20 - if module.params['ami_type'] is None: - module.params['ami_type'] = "AL2_x86_64" - if module.params['instance_types'] is None: - module.params['instance_types'] = ["t3.medium"] + if module.params["launch_template"] is None: + if module.params["disk_size"] is None: + module.params["disk_size"] = 20 + if module.params["ami_type"] is None: + module.params["ami_type"] = "AL2_x86_64" + if module.params["instance_types"] is None: + module.params["instance_types"] = ["t3.medium"] else: - if (module.params['launch_template']['id'] is None) and (module.params['launch_template']['name'] is None): - module.exit_json(changed=False, msg='To use launch_template, it is necessary to inform the id or name.') + if (module.params["launch_template"]["id"] is None) and (module.params["launch_template"]["name"] is None): + module.exit_json(changed=False, msg="To use launch_template, it is necessary to inform the id or name.") try: - client = module.client('eks') + client = module.client("eks") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't connect to AWS.") - if module.params.get('state') == 'present': + if module.params.get("state") == "present": create_or_update_nodegroups(client, module) else: delete_nodegroups(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/elasticache.py b/plugins/modules/elasticache.py index 067134725d7..ac6ea78b69f 100644 --- a/plugins/modules/elasticache.py +++ b/plugins/modules/elasticache.py @@ -143,16 +143,29 @@ from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -class ElastiCacheManager(): +class ElastiCacheManager: """Handles elasticache creation and destruction""" - EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying'] - - def __init__(self, module, name, engine, cache_engine_version, node_type, - num_nodes, cache_port, cache_parameter_group, cache_subnet_group, - cache_security_groups, security_group_ids, zone, wait, - hard_modify): + EXIST_STATUSES = ["available", "creating", "rebooting", "modifying"] + + def __init__( + self, + module, + name, + engine, + cache_engine_version, + node_type, + num_nodes, + cache_port, + cache_parameter_group, + cache_subnet_group, + cache_security_groups, + security_group_ids, + zone, + wait, + hard_modify, + ): self.module = module self.name = name self.engine = engine.lower() @@ -170,7 +183,7 @@ def __init__(self, module, name, engine, cache_engine_version, node_type, self.changed = False self.data = None - self.status = 'gone' + self.status = "gone" self.conn = self._get_elasticache_connection() self._refresh_data() @@ -195,32 +208,34 @@ def exists(self): def create(self): """Create an ElastiCache cluster""" - if self.status == 'available': + if self.status == "available": return - if self.status in ['creating', 'rebooting', 'modifying']: + if self.status in ["creating", "rebooting", "modifying"]: if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") return - if self.status == 'deleting': + if self.status == "deleting": if self.wait: - self._wait_for_status('gone') + self._wait_for_status("gone") else: msg = "'%s' is currently deleting. Cannot create." self.module.fail_json(msg=msg % self.name) - kwargs = dict(CacheClusterId=self.name, - NumCacheNodes=self.num_nodes, - CacheNodeType=self.node_type, - Engine=self.engine, - EngineVersion=self.cache_engine_version, - CacheSecurityGroupNames=self.cache_security_groups, - SecurityGroupIds=self.security_group_ids, - CacheParameterGroupName=self.cache_parameter_group, - CacheSubnetGroupName=self.cache_subnet_group) + kwargs = dict( + CacheClusterId=self.name, + NumCacheNodes=self.num_nodes, + CacheNodeType=self.node_type, + Engine=self.engine, + EngineVersion=self.cache_engine_version, + CacheSecurityGroupNames=self.cache_security_groups, + SecurityGroupIds=self.security_group_ids, + CacheParameterGroupName=self.cache_parameter_group, + CacheSubnetGroupName=self.cache_subnet_group, + ) if self.cache_port is not None: - kwargs['Port'] = self.cache_port + kwargs["Port"] = self.cache_port if self.zone is not None: - kwargs['PreferredAvailabilityZone'] = self.zone + kwargs["PreferredAvailabilityZone"] = self.zone try: self.conn.create_cache_cluster(**kwargs) @@ -232,20 +247,20 @@ def create(self): self.changed = True if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") return True def delete(self): """Destroy an ElastiCache cluster""" - if self.status == 'gone': + if self.status == "gone": return - if self.status == 'deleting': + if self.status == "deleting": if self.wait: - self._wait_for_status('gone') + self._wait_for_status("gone") return - if self.status in ['creating', 'rebooting', 'modifying']: + if self.status in ["creating", "rebooting", "modifying"]: if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") else: msg = "'%s' is currently %s. Cannot delete." self.module.fail_json(msg=msg % (self.name, self.status)) @@ -255,12 +270,12 @@ def delete(self): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Failed to delete cache cluster") - cache_cluster_data = response['CacheCluster'] + cache_cluster_data = response["CacheCluster"] self._refresh_data(cache_cluster_data) self.changed = True if self.wait: - self._wait_for_status('gone') + self._wait_for_status("gone") def sync(self): """Sync settings to cluster if required""" @@ -268,9 +283,9 @@ def sync(self): msg = "'%s' is %s. Cannot sync." self.module.fail_json(msg=msg % (self.name, self.status)) - if self.status in ['creating', 'rebooting', 'modifying']: + if self.status in ["creating", "rebooting", "modifying"]: if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") else: # Cluster can only be synced if available. If we can't wait # for this, then just be done. @@ -294,14 +309,16 @@ def modify(self): """Modify the cache cluster. Note it's only possible to modify a few select options.""" nodes_to_remove = self._get_nodes_to_remove() try: - self.conn.modify_cache_cluster(CacheClusterId=self.name, - NumCacheNodes=self.num_nodes, - CacheNodeIdsToRemove=nodes_to_remove, - CacheSecurityGroupNames=self.cache_security_groups, - CacheParameterGroupName=self.cache_parameter_group, - SecurityGroupIds=self.security_group_ids, - ApplyImmediately=True, - EngineVersion=self.cache_engine_version) + self.conn.modify_cache_cluster( + CacheClusterId=self.name, + NumCacheNodes=self.num_nodes, + CacheNodeIdsToRemove=nodes_to_remove, + CacheSecurityGroupNames=self.cache_security_groups, + CacheParameterGroupName=self.cache_parameter_group, + SecurityGroupIds=self.security_group_ids, + ApplyImmediately=True, + EngineVersion=self.cache_engine_version, + ) except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Failed to modify cache cluster") @@ -309,27 +326,26 @@ def modify(self): self.changed = True if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") def reboot(self): """Reboot the cache cluster""" if not self.exists(): msg = "'%s' is %s. Cannot reboot." self.module.fail_json(msg=msg % (self.name, self.status)) - if self.status == 'rebooting': + if self.status == "rebooting": return - if self.status in ['creating', 'modifying']: + if self.status in ["creating", "modifying"]: if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") else: msg = "'%s' is currently %s. Cannot reboot." self.module.fail_json(msg=msg % (self.name, self.status)) # Collect ALL nodes for reboot - cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']] + cache_node_ids = [cn["CacheNodeId"] for cn in self.data["CacheNodes"]] try: - self.conn.reboot_cache_cluster(CacheClusterId=self.name, - CacheNodeIdsToReboot=cache_node_ids) + self.conn.reboot_cache_cluster(CacheClusterId=self.name, CacheNodeIdsToReboot=cache_node_ids) except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Failed to reboot cache cluster") @@ -337,26 +353,18 @@ def reboot(self): self.changed = True if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") def get_info(self): """Return basic info about the cache cluster""" - info = { - 'name': self.name, - 'status': self.status - } + info = {"name": self.name, "status": self.status} if self.data: - info['data'] = self.data + info["data"] = self.data return info def _wait_for_status(self, awaited_status): """Wait for status to change from present status to awaited_status""" - status_map = { - 'creating': 'available', - 'rebooting': 'available', - 'modifying': 'available', - 'deleting': 'gone' - } + status_map = {"creating": "available", "rebooting": "available", "modifying": "available", "deleting": "gone"} if self.status == awaited_status: # No need to wait, we're already done return @@ -377,27 +385,24 @@ def _wait_for_status(self, awaited_status): def _requires_modification(self): """Check if cluster requires (nondestructive) modification""" # Check modifiable data attributes - modifiable_data = { - 'NumCacheNodes': self.num_nodes, - 'EngineVersion': self.cache_engine_version - } + modifiable_data = {"NumCacheNodes": self.num_nodes, "EngineVersion": self.cache_engine_version} for key, value in modifiable_data.items(): if value is not None and value and self.data[key] != value: return True # Check cache security groups cache_security_groups = [] - for sg in self.data['CacheSecurityGroups']: - cache_security_groups.append(sg['CacheSecurityGroupName']) + for sg in self.data["CacheSecurityGroups"]: + cache_security_groups.append(sg["CacheSecurityGroupName"]) if set(cache_security_groups) != set(self.cache_security_groups): return True # check vpc security groups if self.security_group_ids: vpc_security_groups = [] - security_groups = self.data.get('SecurityGroups', []) + security_groups = self.data.get("SecurityGroups", []) for sg in security_groups: - vpc_security_groups.append(sg['SecurityGroupId']) + vpc_security_groups.append(sg["SecurityGroupId"]) if set(vpc_security_groups) != set(self.security_group_ids): return True @@ -408,13 +413,13 @@ def _requires_destroy_and_create(self): Check whether a destroy and create is required to synchronize cluster. """ unmodifiable_data = { - 'node_type': self.data['CacheNodeType'], - 'engine': self.data['Engine'], - 'cache_port': self._get_port() + "node_type": self.data["CacheNodeType"], + "engine": self.data["Engine"], + "cache_port": self._get_port(), } # Only check for modifications if zone is specified if self.zone is not None: - unmodifiable_data['zone'] = self.data['PreferredAvailabilityZone'] + unmodifiable_data["zone"] = self.data["PreferredAvailabilityZone"] for key, value in unmodifiable_data.items(): if getattr(self, key) is not None and getattr(self, key) != value: return True @@ -423,18 +428,18 @@ def _requires_destroy_and_create(self): def _get_elasticache_connection(self): """Get an elasticache connection""" try: - return self.module.client('elasticache') + return self.module.client("elasticache") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Failed to connect to AWS') + self.module.fail_json_aws(e, msg="Failed to connect to AWS") def _get_port(self): """Get the port. Where this information is retrieved from is engine dependent.""" - if self.data['Engine'] == 'memcached': - return self.data['ConfigurationEndpoint']['Port'] - elif self.data['Engine'] == 'redis': + if self.data["Engine"] == "memcached": + return self.data["ConfigurationEndpoint"]["Port"] + elif self.data["Engine"] == "redis": # Redis only supports a single node (presently) so just use # the first and only - return self.data['CacheNodes'][0]['Endpoint']['Port'] + return self.data["CacheNodes"][0]["Endpoint"]["Port"] def _refresh_data(self, cache_cluster_data=None): """Refresh data about this cache cluster""" @@ -442,25 +447,25 @@ def _refresh_data(self, cache_cluster_data=None): if cache_cluster_data is None: try: response = self.conn.describe_cache_clusters(CacheClusterId=self.name, ShowCacheNodeInfo=True) - except is_boto3_error_code('CacheClusterNotFound'): + except is_boto3_error_code("CacheClusterNotFound"): self.data = None - self.status = 'gone' + self.status = "gone" return except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except self.module.fail_json_aws(e, msg="Failed to describe cache clusters") - cache_cluster_data = response['CacheClusters'][0] + cache_cluster_data = response["CacheClusters"][0] self.data = cache_cluster_data - self.status = self.data['CacheClusterStatus'] + self.status = self.data["CacheClusterStatus"] # The documentation for elasticache lies -- status on rebooting is set # to 'rebooting cache cluster nodes' instead of 'rebooting'. Fix it # here to make status checks etc. more sane. - if self.status == 'rebooting cache cluster nodes': - self.status = 'rebooting' + if self.status == "rebooting cache cluster nodes": + self.status = "rebooting" def _get_nodes_to_remove(self): """If there are nodes to remove, it figures out which need to be removed""" - num_nodes_to_remove = self.data['NumCacheNodes'] - self.num_nodes + num_nodes_to_remove = self.data["NumCacheNodes"] - self.num_nodes if num_nodes_to_remove <= 0: return [] @@ -468,76 +473,83 @@ def _get_nodes_to_remove(self): msg = "'%s' requires removal of cache nodes. 'hard_modify' must be set to true to proceed." self.module.fail_json(msg=msg % self.name) - cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']] + cache_node_ids = [cn["CacheNodeId"] for cn in self.data["CacheNodes"]] return cache_node_ids[-num_nodes_to_remove:] def main(): - """ elasticache ansible module """ + """elasticache ansible module""" argument_spec = dict( - state=dict(required=True, choices=['present', 'absent', 'rebooted']), + state=dict(required=True, choices=["present", "absent", "rebooted"]), name=dict(required=True), - engine=dict(default='memcached'), + engine=dict(default="memcached"), cache_engine_version=dict(default=""), - node_type=dict(default='cache.t2.small'), - num_nodes=dict(default=1, type='int'), + node_type=dict(default="cache.t2.small"), + num_nodes=dict(default=1, type="int"), # alias for compat with the original PR 1950 - cache_parameter_group=dict(default="", aliases=['parameter_group']), - cache_port=dict(type='int'), + cache_parameter_group=dict(default="", aliases=["parameter_group"]), + cache_port=dict(type="int"), cache_subnet_group=dict(default=""), - cache_security_groups=dict(default=[], type='list', elements='str'), - security_group_ids=dict(default=[], type='list', elements='str'), + cache_security_groups=dict(default=[], type="list", elements="str"), + security_group_ids=dict(default=[], type="list", elements="str"), zone=dict(), - wait=dict(default=True, type='bool'), - hard_modify=dict(type='bool'), + wait=dict(default=True, type="bool"), + hard_modify=dict(type="bool"), ) module = AnsibleAWSModule( argument_spec=argument_spec, ) - name = module.params['name'] - state = module.params['state'] - engine = module.params['engine'] - cache_engine_version = module.params['cache_engine_version'] - node_type = module.params['node_type'] - num_nodes = module.params['num_nodes'] - cache_port = module.params['cache_port'] - cache_subnet_group = module.params['cache_subnet_group'] - cache_security_groups = module.params['cache_security_groups'] - security_group_ids = module.params['security_group_ids'] - zone = module.params['zone'] - wait = module.params['wait'] - hard_modify = module.params['hard_modify'] - cache_parameter_group = module.params['cache_parameter_group'] + name = module.params["name"] + state = module.params["state"] + engine = module.params["engine"] + cache_engine_version = module.params["cache_engine_version"] + node_type = module.params["node_type"] + num_nodes = module.params["num_nodes"] + cache_port = module.params["cache_port"] + cache_subnet_group = module.params["cache_subnet_group"] + cache_security_groups = module.params["cache_security_groups"] + security_group_ids = module.params["security_group_ids"] + zone = module.params["zone"] + wait = module.params["wait"] + hard_modify = module.params["hard_modify"] + cache_parameter_group = module.params["cache_parameter_group"] if cache_subnet_group and cache_security_groups: module.fail_json(msg="Can't specify both cache_subnet_group and cache_security_groups") - if state == 'present' and not num_nodes: + if state == "present" and not num_nodes: module.fail_json(msg="'num_nodes' is a required parameter. Please specify num_nodes > 0") - elasticache_manager = ElastiCacheManager(module, name, engine, - cache_engine_version, node_type, - num_nodes, cache_port, - cache_parameter_group, - cache_subnet_group, - cache_security_groups, - security_group_ids, zone, wait, - hard_modify) + elasticache_manager = ElastiCacheManager( + module, + name, + engine, + cache_engine_version, + node_type, + num_nodes, + cache_port, + cache_parameter_group, + cache_subnet_group, + cache_security_groups, + security_group_ids, + zone, + wait, + hard_modify, + ) - if state == 'present': + if state == "present": elasticache_manager.ensure_present() - elif state == 'absent': + elif state == "absent": elasticache_manager.ensure_absent() - elif state == 'rebooted': + elif state == "rebooted": elasticache_manager.ensure_rebooted() - facts_result = dict(changed=elasticache_manager.changed, - elasticache=elasticache_manager.get_info()) + facts_result = dict(changed=elasticache_manager.changed, elasticache=elasticache_manager.get_info()) module.exit_json(**facts_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/elasticache_info.py b/plugins/modules/elasticache_info.py index 31283cd18aa..28b31f76a7f 100644 --- a/plugins/modules/elasticache_info.py +++ b/plugins/modules/elasticache_info.py @@ -418,40 +418,40 @@ @AWSRetry.exponential_backoff() def describe_cache_clusters_with_backoff(client, cluster_id=None): - paginator = client.get_paginator('describe_cache_clusters') + paginator = client.get_paginator("describe_cache_clusters") params = dict(ShowCacheNodeInfo=True) if cluster_id: - params['CacheClusterId'] = cluster_id + params["CacheClusterId"] = cluster_id try: response = paginator.paginate(**params).build_full_result() - except is_boto3_error_code('CacheClusterNotFound'): + except is_boto3_error_code("CacheClusterNotFound"): return [] - return response['CacheClusters'] + return response["CacheClusters"] @AWSRetry.exponential_backoff() def describe_replication_group_with_backoff(client, replication_group_id): try: response = client.describe_replication_groups(ReplicationGroupId=replication_group_id) - except is_boto3_error_code('ReplicationGroupNotFoundFault'): + except is_boto3_error_code("ReplicationGroupNotFoundFault"): return None - return response['ReplicationGroups'][0] + return response["ReplicationGroups"][0] @AWSRetry.exponential_backoff() def get_elasticache_tags_with_backoff(client, cluster_id): - return client.list_tags_for_resource(ResourceName=cluster_id)['TagList'] + return client.list_tags_for_resource(ResourceName=cluster_id)["TagList"] def get_aws_account_id(module): try: - client = module.client('sts') + client = module.client("sts") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Can't authorize connection") try: - return client.get_caller_identity()['Account'] + return client.get_caller_identity()["Account"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't obtain AWS account id") @@ -459,16 +459,15 @@ def get_aws_account_id(module): def get_elasticache_clusters(client, module): region = module.region try: - clusters = describe_cache_clusters_with_backoff(client, cluster_id=module.params.get('name')) + clusters = describe_cache_clusters_with_backoff(client, cluster_id=module.params.get("name")) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't obtain cache cluster info") account_id = get_aws_account_id(module) results = [] for cluster in clusters: - cluster = camel_dict_to_snake_dict(cluster) - arn = "arn:aws:elasticache:%s:%s:cluster:%s" % (region, account_id, cluster['cache_cluster_id']) + arn = "arn:aws:elasticache:%s:%s:cluster:%s" % (region, account_id, cluster["cache_cluster_id"]) try: tags = get_elasticache_tags_with_backoff(client, arn) except is_boto3_error_code("CacheClusterNotFound"): @@ -477,17 +476,17 @@ def get_elasticache_clusters(client, module): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get tags for cluster %s") - cluster['tags'] = boto3_tag_list_to_ansible_dict(tags) + cluster["tags"] = boto3_tag_list_to_ansible_dict(tags) - if cluster.get('replication_group_id', None): + if cluster.get("replication_group_id", None): try: - replication_group = describe_replication_group_with_backoff(client, cluster['replication_group_id']) + replication_group = describe_replication_group_with_backoff(client, cluster["replication_group_id"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't obtain replication group info") if replication_group is not None: replication_group = camel_dict_to_snake_dict(replication_group) - cluster['replication_group'] = replication_group + cluster["replication_group"] = replication_group results.append(cluster) return results @@ -499,10 +498,10 @@ def main(): ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - client = module.client('elasticache') + client = module.client("elasticache") module.exit_json(elasticache_clusters=get_elasticache_clusters(client, module)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/elasticache_parameter_group.py b/plugins/modules/elasticache_parameter_group.py index 04a789bd59f..1e5a1c63b6f 100644 --- a/plugins/modules/elasticache_parameter_group.py +++ b/plugins/modules/elasticache_parameter_group.py @@ -119,9 +119,11 @@ def create(module, conn, name, group_family, description): - """ Create ElastiCache parameter group. """ + """Create ElastiCache parameter group.""" try: - response = conn.create_cache_parameter_group(CacheParameterGroupName=name, CacheParameterGroupFamily=group_family, Description=description) + response = conn.create_cache_parameter_group( + CacheParameterGroupName=name, CacheParameterGroupFamily=group_family, Description=description + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to create cache parameter group.") @@ -129,7 +131,7 @@ def create(module, conn, name, group_family, description): def delete(module, conn, name): - """ Delete ElastiCache parameter group. """ + """Delete ElastiCache parameter group.""" try: conn.delete_cache_parameter_group(CacheParameterGroupName=name) response = {} @@ -140,7 +142,7 @@ def delete(module, conn, name): def make_current_modifiable_param_dict(module, conn, name): - """ Gets the current state of the cache parameter group and creates a dict with the format: {ParameterName: [Allowed_Values, DataType, ParameterValue]}""" + """Gets the current state of the cache parameter group and creates a dict with the format: {ParameterName: [Allowed_Values, DataType, ParameterValue]}""" current_info = get_info(conn, name) if current_info is False: module.fail_json(msg="Could not connect to the cache parameter group %s." % name) @@ -157,7 +159,7 @@ def make_current_modifiable_param_dict(module, conn, name): def check_valid_modification(module, values, modifiable_params): - """ Check if the parameters and values in values are valid. """ + """Check if the parameters and values in values are valid.""" changed_with_update = False for parameter in values: @@ -165,7 +167,10 @@ def check_valid_modification(module, values, modifiable_params): # check valid modifiable parameters if parameter not in modifiable_params: - module.fail_json(msg="%s is not a modifiable parameter. Valid parameters to modify are: %s." % (parameter, modifiable_params.keys())) + module.fail_json( + msg="%s is not a modifiable parameter. Valid parameters to modify are: %s." + % (parameter, modifiable_params.keys()) + ) # check allowed datatype for modified parameters str_to_type = {"integer": int, "string": string_types} @@ -180,18 +185,24 @@ def check_valid_modification(module, values, modifiable_params): if isinstance(new_value, bool): values[parameter] = 1 if new_value else 0 else: - module.fail_json(msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." % - (new_value, type(new_value), parameter, modifiable_params[parameter][1])) + module.fail_json( + msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." + % (new_value, type(new_value), parameter, modifiable_params[parameter][1]) + ) else: - module.fail_json(msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." % - (new_value, type(new_value), parameter, modifiable_params[parameter][1])) + module.fail_json( + msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." + % (new_value, type(new_value), parameter, modifiable_params[parameter][1]) + ) # check allowed values for modifiable parameters choices = modifiable_params[parameter][0] if choices: if not (to_text(new_value) in choices or isinstance(new_value, int)): - module.fail_json(msg="%s is not an allowed value for the parameter %s. Valid parameters are: %s." % - (new_value, parameter, choices)) + module.fail_json( + msg="%s is not an allowed value for the parameter %s. Valid parameters are: %s." + % (new_value, parameter, choices) + ) # check if a new value is different from current value if to_text(values[parameter]) != modifiable_params[parameter][2]: @@ -201,7 +212,7 @@ def check_valid_modification(module, values, modifiable_params): def check_changed_parameter_values(values, old_parameters, new_parameters): - """ Checking if the new values are different than the old values. """ + """Checking if the new values are different than the old values.""" changed_with_update = False # if the user specified parameters to reset, only check those for change @@ -221,21 +232,23 @@ def check_changed_parameter_values(values, old_parameters, new_parameters): def modify(module, conn, name, values): - """ Modify ElastiCache parameter group to reflect the new information if it differs from the current. """ + """Modify ElastiCache parameter group to reflect the new information if it differs from the current.""" # compares current group parameters with the parameters we've specified to to a value to see if this will change the group format_parameters = [] for key in values: value = to_text(values[key]) - format_parameters.append({'ParameterName': key, 'ParameterValue': value}) + format_parameters.append({"ParameterName": key, "ParameterValue": value}) try: - response = conn.modify_cache_parameter_group(CacheParameterGroupName=name, ParameterNameValues=format_parameters) + response = conn.modify_cache_parameter_group( + CacheParameterGroupName=name, ParameterNameValues=format_parameters + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to modify cache parameter group.") return response def reset(module, conn, name, values): - """ Reset ElastiCache parameter group if the current information is different from the new information. """ + """Reset ElastiCache parameter group if the current information is different from the new information.""" # used to compare with the reset parameters' dict to see if there have been changes old_parameters_dict = make_current_modifiable_param_dict(module, conn, name) @@ -247,12 +260,14 @@ def reset(module, conn, name, values): format_parameters = [] for key in values: value = to_text(values[key]) - format_parameters.append({'ParameterName': key, 'ParameterValue': value}) + format_parameters.append({"ParameterName": key, "ParameterValue": value}) else: all_parameters = True try: - response = conn.reset_cache_parameter_group(CacheParameterGroupName=name, ParameterNameValues=format_parameters, ResetAllParameters=all_parameters) + response = conn.reset_cache_parameter_group( + CacheParameterGroupName=name, ParameterNameValues=format_parameters, ResetAllParameters=all_parameters + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to reset cache parameter group.") @@ -264,7 +279,7 @@ def reset(module, conn, name, values): def get_info(conn, name): - """ Gets info about the ElastiCache parameter group. Returns false if it doesn't exist or we don't have access. """ + """Gets info about the ElastiCache parameter group. Returns false if it doesn't exist or we don't have access.""" try: data = conn.describe_cache_parameters(CacheParameterGroupName=name) return data @@ -274,36 +289,50 @@ def get_info(conn, name): def main(): argument_spec = dict( - group_family=dict(type='str', choices=['memcached1.4', 'memcached1.5', 'redis2.6', 'redis2.8', 'redis3.2', 'redis4.0', 'redis5.0', 'redis6.x']), - name=dict(required=True, type='str'), - description=dict(default='', type='str'), - state=dict(required=True, choices=['present', 'absent', 'reset']), - values=dict(type='dict'), + group_family=dict( + type="str", + choices=[ + "memcached1.4", + "memcached1.5", + "redis2.6", + "redis2.8", + "redis3.2", + "redis4.0", + "redis5.0", + "redis6.x", + ], + ), + name=dict(required=True, type="str"), + description=dict(default="", type="str"), + state=dict(required=True, choices=["present", "absent", "reset"]), + values=dict(type="dict"), ) module = AnsibleAWSModule(argument_spec=argument_spec) - parameter_group_family = module.params.get('group_family') - parameter_group_name = module.params.get('name') - group_description = module.params.get('description') - state = module.params.get('state') - values = module.params.get('values') + parameter_group_family = module.params.get("group_family") + parameter_group_name = module.params.get("name") + group_description = module.params.get("description") + state = module.params.get("state") + values = module.params.get("values") try: - connection = module.client('elasticache') + connection = module.client("elasticache") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") exists = get_info(connection, parameter_group_name) # check that the needed requirements are available - if state == 'present' and not (exists or parameter_group_family): + if state == "present" and not (exists or parameter_group_family): module.fail_json(msg="Creating a group requires a family group.") - elif state == 'reset' and not exists: - module.fail_json(msg="No group %s to reset. Please create the group before using the state 'reset'." % parameter_group_name) + elif state == "reset" and not exists: + module.fail_json( + msg="No group %s to reset. Please create the group before using the state 'reset'." % parameter_group_name + ) # Taking action changed = False - if state == 'present': + if state == "present": if exists: # confirm that the group exists without any actions if not values: @@ -316,19 +345,21 @@ def main(): response = modify(module, connection, parameter_group_name, values) # create group else: - response, changed = create(module, connection, parameter_group_name, parameter_group_family, group_description) + response, changed = create( + module, connection, parameter_group_name, parameter_group_family, group_description + ) if values: modifiable_params = make_current_modifiable_param_dict(module, connection, parameter_group_name) changed, values = check_valid_modification(module, values, modifiable_params) response = modify(module, connection, parameter_group_name, values) - elif state == 'absent': + elif state == "absent": if exists: # delete group response, changed = delete(module, connection, parameter_group_name) else: response = {} changed = False - elif state == 'reset': + elif state == "reset": response, changed = reset(module, connection, parameter_group_name, values) facts_result = dict(changed=changed, elasticache=camel_dict_to_snake_dict(response)) @@ -336,5 +367,5 @@ def main(): module.exit_json(**facts_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/elasticache_snapshot.py b/plugins/modules/elasticache_snapshot.py index e477fc86aa5..66c9cb9da57 100644 --- a/plugins/modules/elasticache_snapshot.py +++ b/plugins/modules/elasticache_snapshot.py @@ -122,13 +122,13 @@ def create(module, connection, replication_id, cluster_id, name): - """ Create an ElastiCache backup. """ + """Create an ElastiCache backup.""" try: - response = connection.create_snapshot(ReplicationGroupId=replication_id, - CacheClusterId=cluster_id, - SnapshotName=name) + response = connection.create_snapshot( + ReplicationGroupId=replication_id, CacheClusterId=cluster_id, SnapshotName=name + ) changed = True - except is_boto3_error_code('SnapshotAlreadyExistsFault'): + except is_boto3_error_code("SnapshotAlreadyExistsFault"): response = {} changed = False except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except @@ -137,11 +137,9 @@ def create(module, connection, replication_id, cluster_id, name): def copy(module, connection, name, target, bucket): - """ Copy an ElastiCache backup. """ + """Copy an ElastiCache backup.""" try: - response = connection.copy_snapshot(SourceSnapshotName=name, - TargetSnapshotName=target, - TargetBucket=bucket) + response = connection.copy_snapshot(SourceSnapshotName=name, TargetSnapshotName=target, TargetBucket=bucket) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to copy the snapshot.") @@ -149,16 +147,18 @@ def copy(module, connection, name, target, bucket): def delete(module, connection, name): - """ Delete an ElastiCache backup. """ + """Delete an ElastiCache backup.""" try: response = connection.delete_snapshot(SnapshotName=name) changed = True - except is_boto3_error_code('SnapshotNotFoundFault'): + except is_boto3_error_code("SnapshotNotFoundFault"): response = {} changed = False - except is_boto3_error_code('InvalidSnapshotState'): # pylint: disable=duplicate-except - module.fail_json(msg="Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow deletion." - "You may need to wait a few minutes.") + except is_boto3_error_code("InvalidSnapshotState"): # pylint: disable=duplicate-except + module.fail_json( + msg="Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow deletion." + "You may need to wait a few minutes." + ) except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to delete the snapshot.") return response, changed @@ -166,38 +166,38 @@ def delete(module, connection, name): def main(): argument_spec = dict( - name=dict(required=True, type='str'), - state=dict(required=True, type='str', choices=['present', 'absent', 'copy']), - replication_id=dict(type='str'), - cluster_id=dict(type='str'), - target=dict(type='str'), - bucket=dict(type='str'), + name=dict(required=True, type="str"), + state=dict(required=True, type="str", choices=["present", "absent", "copy"]), + replication_id=dict(type="str"), + cluster_id=dict(type="str"), + target=dict(type="str"), + bucket=dict(type="str"), ) module = AnsibleAWSModule(argument_spec=argument_spec) - name = module.params.get('name') - state = module.params.get('state') - replication_id = module.params.get('replication_id') - cluster_id = module.params.get('cluster_id') - target = module.params.get('target') - bucket = module.params.get('bucket') + name = module.params.get("name") + state = module.params.get("state") + replication_id = module.params.get("replication_id") + cluster_id = module.params.get("cluster_id") + target = module.params.get("target") + bucket = module.params.get("bucket") try: - connection = module.client('elasticache') + connection = module.client("elasticache") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") changed = False response = {} - if state == 'present': + if state == "present": if not all((replication_id, cluster_id)): module.fail_json(msg="The state 'present' requires options: 'replication_id' and 'cluster_id'") response, changed = create(module, connection, replication_id, cluster_id, name) - elif state == 'absent': + elif state == "absent": response, changed = delete(module, connection, name) - elif state == 'copy': + elif state == "copy": if not all((target, bucket)): module.fail_json(msg="The state 'copy' requires options: 'target' and 'bucket'.") response, changed = copy(module, connection, name, target, bucket) @@ -207,5 +207,5 @@ def main(): module.exit_json(**facts_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/elasticache_subnet_group.py b/plugins/modules/elasticache_subnet_group.py index 6353c72837b..f7740e696fb 100644 --- a/plugins/modules/elasticache_subnet_group.py +++ b/plugins/modules/elasticache_subnet_group.py @@ -113,10 +113,13 @@ def get_subnet_group(name): groups = client.describe_cache_subnet_groups( aws_retry=True, CacheSubnetGroupName=name, - )['CacheSubnetGroups'] - except is_boto3_error_code('CacheSubnetGroupNotFoundFault'): + )["CacheSubnetGroups"] + except is_boto3_error_code("CacheSubnetGroupNotFoundFault"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to describe subnet group") if not groups: @@ -130,19 +133,18 @@ def get_subnet_group(name): subnet_group = camel_dict_to_snake_dict(groups[0]) - subnet_group['name'] = subnet_group['cache_subnet_group_name'] - subnet_group['description'] = subnet_group['cache_subnet_group_description'] + subnet_group["name"] = subnet_group["cache_subnet_group_name"] + subnet_group["description"] = subnet_group["cache_subnet_group_description"] - subnet_ids = list(s['subnet_identifier'] for s in subnet_group['subnets']) - subnet_group['subnet_ids'] = subnet_ids + subnet_ids = list(s["subnet_identifier"] for s in subnet_group["subnets"]) + subnet_group["subnet_ids"] = subnet_ids return subnet_group def create_subnet_group(name, description, subnets): - if not subnets: - module.fail_json(msg='At least one subnet must be provided when creating a subnet group') + module.fail_json(msg="At least one subnet must be provided when creating a subnet group") if module.check_mode: return True @@ -163,13 +165,13 @@ def create_subnet_group(name, description, subnets): def update_subnet_group(subnet_group, name, description, subnets): update_params = dict() - if description and subnet_group['description'] != description: - update_params['CacheSubnetGroupDescription'] = description + if description and subnet_group["description"] != description: + update_params["CacheSubnetGroupDescription"] = description if subnets: - old_subnets = set(subnet_group['subnet_ids']) + old_subnets = set(subnet_group["subnet_ids"]) new_subnets = set(subnets) if old_subnets != new_subnets: - update_params['SubnetIds'] = list(subnets) + update_params["SubnetIds"] = list(subnets) if not update_params: return False @@ -190,7 +192,6 @@ def update_subnet_group(subnet_group, name, description, subnets): def delete_subnet_group(name): - if module.check_mode: return True @@ -200,20 +201,23 @@ def delete_subnet_group(name): CacheSubnetGroupName=name, ) return True - except is_boto3_error_code('CacheSubnetGroupNotFoundFault'): + except is_boto3_error_code("CacheSubnetGroupNotFoundFault"): # AWS is "eventually consistent", cope with the race conditions where # deletion hadn't completed when we ran describe return False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to delete subnet group") def main(): argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), name=dict(required=True), description=dict(required=False), - subnets=dict(required=False, type='list', elements='str'), + subnets=dict(required=False, type="list", elements="str"), ) global module @@ -224,17 +228,17 @@ def main(): supports_check_mode=True, ) - state = module.params.get('state') - name = module.params.get('name').lower() - description = module.params.get('description') - subnets = module.params.get('subnets') + state = module.params.get("state") + name = module.params.get("name").lower() + description = module.params.get("description") + subnets = module.params.get("subnets") - client = module.client('elasticache', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("elasticache", retry_decorator=AWSRetry.jittered_backoff()) subnet_group = get_subnet_group(name) changed = False - if state == 'present': + if state == "present": if not subnet_group: result = create_subnet_group(name, description, subnets) changed |= result @@ -251,5 +255,5 @@ def main(): module.exit_json(changed=changed, cache_subnet_group=subnet_group) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/elasticbeanstalk_app.py b/plugins/modules/elasticbeanstalk_app.py index c6333379456..bf11afbb2bf 100644 --- a/plugins/modules/elasticbeanstalk_app.py +++ b/plugins/modules/elasticbeanstalk_app.py @@ -112,24 +112,24 @@ def list_apps(ebs, app_name, module): def check_app(ebs, app, module): - app_name = module.params['app_name'] - description = module.params['description'] - state = module.params['state'] - terminate_by_force = module.params['terminate_by_force'] + app_name = module.params["app_name"] + description = module.params["description"] + state = module.params["state"] + terminate_by_force = module.params["terminate_by_force"] result = {} - if state == 'present' and app is None: + if state == "present" and app is None: result = dict(changed=True, output="App would be created") - elif state == 'present' and app.get("Description", None) != description: + elif state == "present" and app.get("Description", None) != description: result = dict(changed=True, output="App would be updated", app=app) - elif state == 'present' and app.get("Description", None) == description: + elif state == "present" and app.get("Description", None) == description: result = dict(changed=False, output="App is up-to-date", app=app) - elif state == 'absent' and app is None: + elif state == "absent" and app is None: result = dict(changed=False, output="App does not exist", app={}) - elif state == 'absent' and app is not None: + elif state == "absent" and app is not None: result = dict(changed=True, output="App will be deleted", app=app) - elif state == 'absent' and app is not None and terminate_by_force is True: + elif state == "absent" and app is not None and terminate_by_force is True: result = dict(changed=True, output="Running environments terminated before the App will be deleted", app=app) module.exit_json(**result) @@ -145,37 +145,36 @@ def filter_empty(**kwargs): def main(): argument_spec = dict( - app_name=dict(aliases=['name'], type='str', required=False), + app_name=dict(aliases=["name"], type="str", required=False), description=dict(), - state=dict(choices=['present', 'absent'], default='present'), - terminate_by_force=dict(type='bool', default=False, required=False) + state=dict(choices=["present", "absent"], default="present"), + terminate_by_force=dict(type="bool", default=False, required=False), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - app_name = module.params['app_name'] - description = module.params['description'] - state = module.params['state'] - terminate_by_force = module.params['terminate_by_force'] + app_name = module.params["app_name"] + description = module.params["description"] + state = module.params["state"] + terminate_by_force = module.params["terminate_by_force"] if app_name is None: module.fail_json(msg='Module parameter "app_name" is required') result = {} - ebs = module.client('elasticbeanstalk') + ebs = module.client("elasticbeanstalk") app = describe_app(ebs, app_name, module) if module.check_mode: check_app(ebs, app, module) - module.fail_json(msg='ASSERTION FAILURE: check_app() should not return control.') + module.fail_json(msg="ASSERTION FAILURE: check_app() should not return control.") - if state == 'present': + if state == "present": if app is None: try: - create_app = ebs.create_application(**filter_empty(ApplicationName=app_name, - Description=description)) + create_app = ebs.create_application(**filter_empty(ApplicationName=app_name, Description=description)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Could not create application") @@ -200,7 +199,7 @@ def main(): else: if app is None: - result = dict(changed=False, output='Application not found', app={}) + result = dict(changed=False, output="Application not found", app={}) else: try: if terminate_by_force: @@ -209,9 +208,12 @@ def main(): else: ebs.delete_application(ApplicationName=app_name) changed = True - except is_boto3_error_message('It is currently pending deletion'): + except is_boto3_error_message("It is currently pending deletion"): changed = False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Cannot terminate app") result = dict(changed=changed, app=app) @@ -219,5 +221,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/elb_classic_lb_info.py b/plugins/modules/elb_classic_lb_info.py index 3d3d43d4e71..db3fd46ac48 100644 --- a/plugins/modules/elb_classic_lb_info.py +++ b/plugins/modules/elb_classic_lb_info.py @@ -161,63 +161,79 @@ def list_elbs(connection, load_balancer_names): def describe_elb(connection, lb): description = camel_dict_to_snake_dict(lb) - name = lb['LoadBalancerName'] - instances = lb.get('Instances', []) - description['tags'] = get_tags(connection, name) - description['instances_inservice'], description['instances_inservice_count'] = lb_instance_health(connection, name, instances, 'InService') - description['instances_outofservice'], description['instances_outofservice_count'] = lb_instance_health(connection, name, instances, 'OutOfService') - description['instances_unknownservice'], description['instances_unknownservice_count'] = lb_instance_health(connection, name, instances, 'Unknown') - description['attributes'] = get_lb_attributes(connection, name) + name = lb["LoadBalancerName"] + instances = lb.get("Instances", []) + description["tags"] = get_tags(connection, name) + description["instances_inservice"], description["instances_inservice_count"] = lb_instance_health( + connection, name, instances, "InService" + ) + description["instances_outofservice"], description["instances_outofservice_count"] = lb_instance_health( + connection, name, instances, "OutOfService" + ) + description["instances_unknownservice"], description["instances_unknownservice_count"] = lb_instance_health( + connection, name, instances, "Unknown" + ) + description["attributes"] = get_lb_attributes(connection, name) return description @AWSRetry.jittered_backoff() def get_all_lb(connection): - paginator = connection.get_paginator('describe_load_balancers') - return paginator.paginate().build_full_result()['LoadBalancerDescriptions'] + paginator = connection.get_paginator("describe_load_balancers") + return paginator.paginate().build_full_result()["LoadBalancerDescriptions"] def get_lb(connection, load_balancer_name): try: - return connection.describe_load_balancers(aws_retry=True, LoadBalancerNames=[load_balancer_name])['LoadBalancerDescriptions'][0] - except is_boto3_error_code('LoadBalancerNotFound'): + return connection.describe_load_balancers(aws_retry=True, LoadBalancerNames=[load_balancer_name])[ + "LoadBalancerDescriptions" + ][0] + except is_boto3_error_code("LoadBalancerNotFound"): return [] def get_lb_attributes(connection, load_balancer_name): - attributes = connection.describe_load_balancer_attributes(aws_retry=True, LoadBalancerName=load_balancer_name).get('LoadBalancerAttributes', {}) + attributes = connection.describe_load_balancer_attributes(aws_retry=True, LoadBalancerName=load_balancer_name).get( + "LoadBalancerAttributes", {} + ) return camel_dict_to_snake_dict(attributes) def get_tags(connection, load_balancer_name): - tags = connection.describe_tags(aws_retry=True, LoadBalancerNames=[load_balancer_name])['TagDescriptions'] + tags = connection.describe_tags(aws_retry=True, LoadBalancerNames=[load_balancer_name])["TagDescriptions"] if not tags: return {} - return boto3_tag_list_to_ansible_dict(tags[0]['Tags']) + return boto3_tag_list_to_ansible_dict(tags[0]["Tags"]) def lb_instance_health(connection, load_balancer_name, instances, state): - instance_states = connection.describe_instance_health(LoadBalancerName=load_balancer_name, Instances=instances).get('InstanceStates', []) - instate = [instance['InstanceId'] for instance in instance_states if instance['State'] == state] + instance_states = connection.describe_instance_health(LoadBalancerName=load_balancer_name, Instances=instances).get( + "InstanceStates", [] + ) + instate = [instance["InstanceId"] for instance in instance_states if instance["State"] == state] return instate, len(instate) def main(): argument_spec = dict( - names=dict(default=[], type='list', elements='str') + names=dict(default=[], type="list", elements="str"), + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True) - connection = module.client('elb', retry_decorator=AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES, delay=MAX_AWS_DELAY)) + connection = module.client( + "elb", retry_decorator=AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES, delay=MAX_AWS_DELAY) + ) try: - elbs = list_elbs(connection, module.params.get('names')) + elbs = list_elbs(connection, module.params.get("names")) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to get load balancer information.") module.exit_json(elbs=elbs) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/elb_instance.py b/plugins/modules/elb_instance.py index fe1128c9661..2d6ca291968 100644 --- a/plugins/modules/elb_instance.py +++ b/plugins/modules/elb_instance.py @@ -107,9 +107,9 @@ class ElbManager: def __init__(self, module, instance_id=None, ec2_elbs=None): retry_decorator = AWSRetry.jittered_backoff() self.module = module - self.client_asg = module.client('autoscaling', retry_decorator=retry_decorator) - self.client_ec2 = module.client('ec2', retry_decorator=retry_decorator) - self.client_elb = module.client('elb', retry_decorator=retry_decorator) + self.client_asg = module.client("autoscaling", retry_decorator=retry_decorator) + self.client_ec2 = module.client("ec2", retry_decorator=retry_decorator) + self.client_elb = module.client("elb", retry_decorator=retry_decorator) self.instance_id = instance_id self.lbs = self._get_instance_lbs(ec2_elbs) self.changed = False @@ -120,11 +120,11 @@ def deregister(self, wait, timeout): to report it out-of-service""" for lb in self.lbs: - instance_ids = [i['InstanceId'] for i in lb['Instances']] + instance_ids = [i["InstanceId"] for i in lb["Instances"]] if self.instance_id not in instance_ids: continue - self.updated_elbs.add(lb['LoadBalancerName']) + self.updated_elbs.add(lb["LoadBalancerName"]) if self.module.check_mode: self.changed = True @@ -133,12 +133,13 @@ def deregister(self, wait, timeout): try: self.client_elb.deregister_instances_from_load_balancer( aws_retry=True, - LoadBalancerName=lb['LoadBalancerName'], + LoadBalancerName=lb["LoadBalancerName"], Instances=[{"InstanceId": self.instance_id}], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, 'Failed to deregister instance from load balancer', - load_balancer=lb, instance=self.instance_id) + self.module.fail_json_aws( + e, "Failed to deregister instance from load balancer", load_balancer=lb, instance=self.instance_id + ) # The ELB is changing state in some way. Either an instance that's # InService is moving to OutOfService, or an instance that's @@ -147,17 +148,17 @@ def deregister(self, wait, timeout): if wait: for lb in self.lbs: - self._await_elb_instance_state(lb, 'Deregistered', timeout) + self._await_elb_instance_state(lb, "Deregistered", timeout) def register(self, wait, enable_availability_zone, timeout): """Register the instance for all ELBs and wait for the ELB to report the instance in-service""" for lb in self.lbs: - instance_ids = [i['InstanceId'] for i in lb['Instances']] + instance_ids = [i["InstanceId"] for i in lb["Instances"]] if self.instance_id in instance_ids: continue - self.updated_elbs.add(lb['LoadBalancerName']) + self.updated_elbs.add(lb["LoadBalancerName"]) if enable_availability_zone: self.changed |= self._enable_availailability_zone(lb) @@ -169,31 +170,32 @@ def register(self, wait, enable_availability_zone, timeout): try: self.client_elb.register_instances_with_load_balancer( aws_retry=True, - LoadBalancerName=lb['LoadBalancerName'], + LoadBalancerName=lb["LoadBalancerName"], Instances=[{"InstanceId": self.instance_id}], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, 'Failed to register instance with load balancer', - load_balancer=lb, instance=self.instance_id) + self.module.fail_json_aws( + e, "Failed to register instance with load balancer", load_balancer=lb, instance=self.instance_id + ) self.changed = True if wait: for lb in self.lbs: - self._await_elb_instance_state(lb, 'InService', timeout) + self._await_elb_instance_state(lb, "InService", timeout) @AWSRetry.jittered_backoff() def _describe_elbs(self, **params): - paginator = self.client_elb.get_paginator('describe_load_balancers') + paginator = self.client_elb.get_paginator("describe_load_balancers") results = paginator.paginate(**params).build_full_result() - return results['LoadBalancerDescriptions'] + return results["LoadBalancerDescriptions"] def exists(self, lbtest): - """ Verify that the named ELB actually exists """ + """Verify that the named ELB actually exists""" found = False for lb in self.lbs: - if lb['LoadBalancerName'] == lbtest: + if lb["LoadBalancerName"] == lbtest: found = True break return found @@ -203,9 +205,9 @@ def _enable_availailability_zone(self, lb): Returns True if the zone was enabled or False if no change was made. lb: load balancer""" instance = self._get_instance() - desired_zone = instance['Placement']['AvailabilityZone'] + desired_zone = instance["Placement"]["AvailabilityZone"] - if desired_zone in lb['AvailabilityZones']: + if desired_zone in lb["AvailabilityZones"]: return False if self.module.check_mode: @@ -214,12 +216,11 @@ def _enable_availailability_zone(self, lb): try: self.client_elb.enable_availability_zones_for_load_balancer( aws_retry=True, - LoadBalancerName=lb['LoadBalancerName'], + LoadBalancerName=lb["LoadBalancerName"], AvailabilityZones=[desired_zone], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, 'Failed to enable AZ on load balancers', - load_balancer=lb, zone=desired_zone) + self.module.fail_json_aws(e, "Failed to enable AZ on load balancers", load_balancer=lb, zone=desired_zone) return True @@ -233,27 +234,29 @@ def _await_elb_instance_state(self, lb, awaited_state, timeout): if awaited_state == initial_state: return - if awaited_state == 'InService': - waiter = self.client_elb.get_waiter('instance_in_service') - elif awaited_state == 'Deregistered': - waiter = self.client_elb.get_waiter('instance_deregistered') - elif awaited_state == 'OutOfService': - waiter = self.client_elb.get_waiter('instance_deregistered') + if awaited_state == "InService": + waiter = self.client_elb.get_waiter("instance_in_service") + elif awaited_state == "Deregistered": + waiter = self.client_elb.get_waiter("instance_deregistered") + elif awaited_state == "OutOfService": + waiter = self.client_elb.get_waiter("instance_deregistered") else: - self.module.fail_json(msg='Could not wait for unknown state', awaited_state=awaited_state) + self.module.fail_json(msg="Could not wait for unknown state", awaited_state=awaited_state) try: waiter.wait( - LoadBalancerName=lb['LoadBalancerName'], + LoadBalancerName=lb["LoadBalancerName"], Instances=[{"InstanceId": self.instance_id}], - WaiterConfig={'Delay': 1, 'MaxAttempts': timeout}, + WaiterConfig={"Delay": 1, "MaxAttempts": timeout}, ) except botocore.exceptions.WaiterError as e: - self.module.fail_json_aws(e, msg='Timeout waiting for instance to reach desired state', - awaited_state=awaited_state) + self.module.fail_json_aws( + e, msg="Timeout waiting for instance to reach desired state", awaited_state=awaited_state + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Error while waiting for instance to reach desired state', - awaited_state=awaited_state) + self.module.fail_json_aws( + e, msg="Error while waiting for instance to reach desired state", awaited_state=awaited_state + ) return @@ -265,18 +268,21 @@ def _get_instance_health(self, lb): try: status = self.client_elb.describe_instance_health( aws_retry=True, - LoadBalancerName=lb['LoadBalancerName'], - Instances=[{'InstanceId': self.instance_id}], - )['InstanceStates'] - except is_boto3_error_code('InvalidInstance'): + LoadBalancerName=lb["LoadBalancerName"], + Instances=[{"InstanceId": self.instance_id}], + )["InstanceStates"] + except is_boto3_error_code("InvalidInstance"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg='Failed to get instance health') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg="Failed to get instance health") if not status: return None - return status[0]['State'] + return status[0]["State"] def _get_instance_lbs(self, ec2_elbs=None): """Returns a list of ELBs attached to self.instance_id @@ -289,12 +295,12 @@ def _get_instance_lbs(self, ec2_elbs=None): ec2_elbs = self._get_auto_scaling_group_lbs() if ec2_elbs: - list_params['LoadBalancerNames'] = ec2_elbs + list_params["LoadBalancerNames"] = ec2_elbs try: elbs = self._describe_elbs(**list_params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, 'Failed to describe load balancers') + self.module.fail_json_aws(e, "Failed to describe load balancers") if ec2_elbs: return elbs @@ -303,7 +309,7 @@ def _get_instance_lbs(self, ec2_elbs=None): # of. lbs = [] for lb in elbs: - instance_ids = [i['InstanceId'] for i in lb['Instances']] + instance_ids = [i["InstanceId"] for i in lb["Instances"]] if self.instance_id in instance_ids: lbs.append(lb) @@ -311,14 +317,14 @@ def _get_instance_lbs(self, ec2_elbs=None): def _get_auto_scaling_group_lbs(self): """Returns a list of ELBs associated with self.instance_id - indirectly through its auto scaling group membership""" + indirectly through its auto scaling group membership""" try: asg_instances = self.client_asg.describe_auto_scaling_instances( - aws_retry=True, - InstanceIds=[self.instance_id])['AutoScalingInstances'] + aws_retry=True, InstanceIds=[self.instance_id] + )["AutoScalingInstances"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Failed to describe ASG Instance') + self.module.fail_json_aws(e, msg="Failed to describe ASG Instance") if len(asg_instances) > 1: self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.") @@ -327,42 +333,40 @@ def _get_auto_scaling_group_lbs(self): # Instance isn't a member of an ASG return [] - asg_name = asg_instances[0]['AutoScalingGroupName'] + asg_name = asg_instances[0]["AutoScalingGroupName"] try: asg_instances = self.client_asg.describe_auto_scaling_groups( - aws_retry=True, - AutoScalingGroupNames=[asg_name])['AutoScalingGroups'] + aws_retry=True, AutoScalingGroupNames=[asg_name] + )["AutoScalingGroups"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Failed to describe ASG Instance') + self.module.fail_json_aws(e, msg="Failed to describe ASG Instance") if len(asg_instances) != 1: self.module.fail_json(msg="Illegal state, expected one auto scaling group.") - return asg_instances[0]['LoadBalancerNames'] + return asg_instances[0]["LoadBalancerNames"] def _get_instance(self): """Returns the description of an instance""" try: - result = self.client_ec2.describe_instances( - aws_retry=True, - InstanceIds=[self.instance_id]) + result = self.client_ec2.describe_instances(aws_retry=True, InstanceIds=[self.instance_id]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Failed to describe ASG Instance') - return result['Reservations'][0]['Instances'][0] + self.module.fail_json_aws(e, msg="Failed to describe ASG Instance") + return result["Reservations"][0]["Instances"][0] def main(): argument_spec = dict( - state={'required': True, 'choices': ['present', 'absent']}, - instance_id={'required': True}, - ec2_elbs={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, - enable_availability_zone={'default': True, 'required': False, 'type': 'bool'}, - wait={'required': False, 'default': True, 'type': 'bool'}, - wait_timeout={'required': False, 'default': 0, 'type': 'int'}, + state={"required": True, "choices": ["present", "absent"]}, + instance_id={"required": True}, + ec2_elbs={"default": None, "required": False, "type": "list", "elements": "str"}, + enable_availability_zone={"default": True, "required": False, "type": "bool"}, + wait={"required": False, "default": True, "type": "bool"}, + wait_timeout={"required": False, "default": 0, "type": "int"}, ) required_if = [ - ('state', 'present', ['ec2_elbs']), + ("state", "present", ["ec2_elbs"]), ] module = AnsibleAWSModule( @@ -371,11 +375,11 @@ def main(): supports_check_mode=True, ) - ec2_elbs = module.params['ec2_elbs'] - wait = module.params['wait'] - enable_availability_zone = module.params['enable_availability_zone'] - timeout = module.params['wait_timeout'] - instance_id = module.params['instance_id'] + ec2_elbs = module.params["ec2_elbs"] + wait = module.params["wait"] + enable_availability_zone = module.params["enable_availability_zone"] + timeout = module.params["wait_timeout"] + instance_id = module.params["instance_id"] elb_man = ElbManager(module, instance_id, ec2_elbs) @@ -384,9 +388,9 @@ def main(): if not elb_man.exists(elb): module.fail_json(msg="ELB {0} does not exist".format(elb)) - if module.params['state'] == 'present': + if module.params["state"] == "present": elb_man.register(wait, enable_availability_zone, timeout) - elif module.params['state'] == 'absent': + elif module.params["state"] == "absent": elb_man.deregister(wait, timeout) module.exit_json( @@ -395,5 +399,5 @@ def main(): ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/elb_network_lb.py b/plugins/modules/elb_network_lb.py index 069882dc90c..76e2454aa65 100644 --- a/plugins/modules/elb_network_lb.py +++ b/plugins/modules/elb_network_lb.py @@ -349,10 +349,12 @@ def create_or_update_elb(elb_obj): # Tags - only need to play with tags if tags parameter has been set to something if elb_obj.tags is not None: - # Delete necessary tags - tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(elb_obj.elb['tags']), - boto3_tag_list_to_ansible_dict(elb_obj.tags), elb_obj.purge_tags) + tags_need_modify, tags_to_delete = compare_aws_tags( + boto3_tag_list_to_ansible_dict(elb_obj.elb["tags"]), + boto3_tag_list_to_ansible_dict(elb_obj.tags), + elb_obj.purge_tags, + ) if tags_to_delete: elb_obj.delete_tags(tags_to_delete) @@ -369,25 +371,29 @@ def create_or_update_elb(elb_obj): elb_obj.modify_elb_attributes() # Listeners - listeners_obj = ELBListeners(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn']) + listeners_obj = ELBListeners(elb_obj.connection, elb_obj.module, elb_obj.elb["LoadBalancerArn"]) listeners_to_add, listeners_to_modify, listeners_to_delete = listeners_obj.compare_listeners() # Delete listeners for listener_to_delete in listeners_to_delete: - listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_delete, elb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener( + elb_obj.connection, elb_obj.module, listener_to_delete, elb_obj.elb["LoadBalancerArn"] + ) listener_obj.delete() listeners_obj.changed = True # Add listeners for listener_to_add in listeners_to_add: - listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_add, elb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_add, elb_obj.elb["LoadBalancerArn"]) listener_obj.add() listeners_obj.changed = True # Modify listeners for listener_to_modify in listeners_to_modify: - listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_modify, elb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener( + elb_obj.connection, elb_obj.module, listener_to_modify, elb_obj.elb["LoadBalancerArn"] + ) listener_obj.modify() listeners_obj.changed = True @@ -396,8 +402,8 @@ def create_or_update_elb(elb_obj): elb_obj.changed = True # Update ELB ip address type only if option has been provided - if elb_obj.module.params.get('ip_address_type') is not None: - elb_obj.modify_ip_address_type(elb_obj.module.params.get('ip_address_type')) + if elb_obj.module.params.get("ip_address_type") is not None: + elb_obj.modify_ip_address_type(elb_obj.module.params.get("ip_address_type")) # Update the objects to pickup changes # Get the ELB again @@ -410,24 +416,20 @@ def create_or_update_elb(elb_obj): # Convert to snake_case and merge in everything we want to return to the user snaked_elb = camel_dict_to_snake_dict(elb_obj.elb) snaked_elb.update(camel_dict_to_snake_dict(elb_obj.elb_attributes)) - snaked_elb['listeners'] = [] + snaked_elb["listeners"] = [] for listener in listeners_obj.current_listeners: - snaked_elb['listeners'].append(camel_dict_to_snake_dict(listener)) + snaked_elb["listeners"].append(camel_dict_to_snake_dict(listener)) # Change tags to ansible friendly dict - snaked_elb['tags'] = boto3_tag_list_to_ansible_dict(snaked_elb['tags']) + snaked_elb["tags"] = boto3_tag_list_to_ansible_dict(snaked_elb["tags"]) # ip address type - snaked_elb['ip_address_type'] = elb_obj.get_elb_ip_address_type() + snaked_elb["ip_address_type"] = elb_obj.get_elb_ip_address_type() - elb_obj.module.exit_json( - changed=elb_obj.changed, - load_balancer=snaked_elb, - **snaked_elb) + elb_obj.module.exit_json(changed=elb_obj.changed, load_balancer=snaked_elb, **snaked_elb) def delete_elb(elb_obj): - if elb_obj.elb: elb_obj.delete() @@ -435,42 +437,42 @@ def delete_elb(elb_obj): def main(): - - argument_spec = ( - dict( - cross_zone_load_balancing=dict(type='bool'), - deletion_protection=dict(type='bool'), - listeners=dict(type='list', - elements='dict', - options=dict( - Protocol=dict(type='str', required=True), - Port=dict(type='int', required=True), - SslPolicy=dict(type='str'), - Certificates=dict(type='list', elements='dict'), - DefaultActions=dict(type='list', required=True, elements='dict') - ) - ), - name=dict(required=True, type='str'), - purge_listeners=dict(default=True, type='bool'), - purge_tags=dict(default=True, type='bool'), - subnets=dict(type='list', elements='str'), - subnet_mappings=dict(type='list', elements='dict'), - scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']), - state=dict(choices=['present', 'absent'], type='str', default='present'), - tags=dict(type='dict', aliases=['resource_tags']), - wait_timeout=dict(type='int'), - wait=dict(type='bool'), - ip_address_type=dict(type='str', choices=['ipv4', 'dualstack']) - ) + argument_spec = dict( + cross_zone_load_balancing=dict(type="bool"), + deletion_protection=dict(type="bool"), + listeners=dict( + type="list", + elements="dict", + options=dict( + Protocol=dict(type="str", required=True), + Port=dict(type="int", required=True), + SslPolicy=dict(type="str"), + Certificates=dict(type="list", elements="dict"), + DefaultActions=dict(type="list", required=True, elements="dict"), + ), + ), + name=dict(required=True, type="str"), + purge_listeners=dict(default=True, type="bool"), + purge_tags=dict(default=True, type="bool"), + subnets=dict(type="list", elements="str"), + subnet_mappings=dict(type="list", elements="dict"), + scheme=dict(default="internet-facing", choices=["internet-facing", "internal"]), + state=dict(choices=["present", "absent"], type="str", default="present"), + tags=dict(type="dict", aliases=["resource_tags"]), + wait_timeout=dict(type="int"), + wait=dict(type="bool"), + ip_address_type=dict(type="str", choices=["ipv4", "dualstack"]), ) required_if = [ - ('state', 'present', ('subnets', 'subnet_mappings',), True) + ["state", "present", ["subnets", "subnet_mappings"], True], ] - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=required_if, - mutually_exclusive=[['subnets', 'subnet_mappings']]) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=required_if, + mutually_exclusive=[["subnets", "subnet_mappings"]], + ) # Check for subnets or subnet_mappings if state is present state = module.params.get("state") @@ -480,20 +482,20 @@ def main(): if listeners is not None: for listener in listeners: for key in listener.keys(): - protocols_list = ['TCP', 'TLS', 'UDP', 'TCP_UDP'] - if key == 'Protocol' and listener[key] not in protocols_list: + protocols_list = ["TCP", "TLS", "UDP", "TCP_UDP"] + if key == "Protocol" and listener[key] not in protocols_list: module.fail_json(msg="'Protocol' must be either " + ", ".join(protocols_list)) - connection = module.client('elbv2') - connection_ec2 = module.client('ec2') + connection = module.client("elbv2") + connection_ec2 = module.client("ec2") elb = NetworkLoadBalancer(connection, connection_ec2, module) - if state == 'present': + if state == "present": create_or_update_elb(elb) else: delete_elb(elb) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/elb_target.py b/plugins/modules/elb_target.py index fd05cd67a3a..cab7b10aef8 100644 --- a/plugins/modules/elb_target.py +++ b/plugins/modules/elb_target.py @@ -127,24 +127,23 @@ from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=['TargetGroupNotFound']) +@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=["TargetGroupNotFound"]) def describe_target_groups_with_backoff(connection, tg_name): return connection.describe_target_groups(Names=[tg_name]) def convert_tg_name_to_arn(connection, module, tg_name): - try: response = describe_target_groups_with_backoff(connection, tg_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to describe target group {0}".format(tg_name)) - tg_arn = response['TargetGroups'][0]['TargetGroupArn'] + tg_arn = response["TargetGroups"][0]["TargetGroupArn"] return tg_arn -@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=['TargetGroupNotFound']) +@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=["TargetGroupNotFound"]) def describe_targets_with_backoff(connection, tg_arn, target): if target is None: tg = [] @@ -155,7 +154,6 @@ def describe_targets_with_backoff(connection, tg_arn, target): def describe_targets(connection, module, tg_arn, target=None): - """ Describe targets in a target group @@ -167,7 +165,7 @@ def describe_targets(connection, module, tg_arn, target=None): """ try: - targets = describe_targets_with_backoff(connection, tg_arn, target)['TargetHealthDescriptions'] + targets = describe_targets_with_backoff(connection, tg_arn, target)["TargetHealthDescriptions"] if not targets: return {} return targets[0] @@ -181,7 +179,6 @@ def register_target_with_backoff(connection, target_group_arn, target): def register_target(connection, module): - """ Registers a target to a target group @@ -203,26 +200,32 @@ def register_target(connection, module): target = dict(Id=target_id) if target_az: - target['AvailabilityZone'] = target_az + target["AvailabilityZone"] = target_az if target_port: - target['Port'] = target_port + target["Port"] = target_port target_description = describe_targets(connection, module, target_group_arn, target) - if 'Reason' in target_description['TargetHealth']: - if target_description['TargetHealth']['Reason'] == "Target.NotRegistered": + if "Reason" in target_description["TargetHealth"]: + if target_description["TargetHealth"]["Reason"] == "Target.NotRegistered": try: register_target_with_backoff(connection, target_group_arn, target) changed = True if target_status: - target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout) + target_status_check( + connection, module, target_group_arn, target, target_status, target_status_timeout + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to deregister target {0}".format(target)) # Get all targets for the target group target_descriptions = describe_targets(connection, module, target_group_arn) - module.exit_json(changed=changed, target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), target_group_arn=target_group_arn) + module.exit_json( + changed=changed, + target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), + target_group_arn=target_group_arn, + ) @AWSRetry.jittered_backoff(retries=10, delay=10) @@ -231,7 +234,6 @@ def deregister_target_with_backoff(connection, target_group_arn, target): def deregister_target(connection, module): - """ Deregisters a target to a target group @@ -253,18 +255,18 @@ def deregister_target(connection, module): target = dict(Id=target_id) if target_port: - target['Port'] = target_port + target["Port"] = target_port target_description = describe_targets(connection, module, target_group_arn, target) - current_target_state = target_description['TargetHealth']['State'] - current_target_reason = target_description['TargetHealth'].get('Reason') + current_target_state = target_description["TargetHealth"]["State"] + current_target_reason = target_description["TargetHealth"].get("Reason") needs_deregister = False - if deregister_unused and current_target_state == 'unused': - if current_target_reason != 'Target.NotRegistered': + if deregister_unused and current_target_state == "unused": + if current_target_reason != "Target.NotRegistered": needs_deregister = True - elif current_target_state not in ['unused', 'draining']: + elif current_target_state not in ["unused", "draining"]: needs_deregister = True if needs_deregister: @@ -274,9 +276,11 @@ def deregister_target(connection, module): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg="Unable to deregister target {0}".format(target)) else: - if current_target_reason != 'Target.NotRegistered' and current_target_state != 'draining': - module.warn(warning="Your specified target has an 'unused' state but is still registered to the target group. " + - "To force deregistration use the 'deregister_unused' option.") + if current_target_reason != "Target.NotRegistered" and current_target_state != "draining": + module.warn( + warning="Your specified target has an 'unused' state but is still registered to the target group. " + + "To force deregistration use the 'deregister_unused' option." + ) if target_status: target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout) @@ -284,53 +288,62 @@ def deregister_target(connection, module): # Get all targets for the target group target_descriptions = describe_targets(connection, module, target_group_arn) - module.exit_json(changed=changed, target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), target_group_arn=target_group_arn) + module.exit_json( + changed=changed, + target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), + target_group_arn=target_group_arn, + ) def target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout): reached_state = False timeout = target_status_timeout + time() while time() < timeout: - health_state = describe_targets(connection, module, target_group_arn, target)['TargetHealth']['State'] + health_state = describe_targets(connection, module, target_group_arn, target)["TargetHealth"]["State"] if health_state == target_status: reached_state = True break sleep(1) if not reached_state: - module.fail_json(msg='Status check timeout of {0} exceeded, last status was {1}: '.format(target_status_timeout, health_state)) + module.fail_json( + msg="Status check timeout of {0} exceeded, last status was {1}: ".format( + target_status_timeout, health_state + ) + ) def main(): - argument_spec = dict( - deregister_unused=dict(type='bool', default=False), - target_az=dict(type='str'), - target_group_arn=dict(type='str'), - target_group_name=dict(type='str'), - target_id=dict(type='str', required=True), - target_port=dict(type='int'), - target_status=dict(choices=['initial', 'healthy', 'unhealthy', 'unused', 'draining', 'unavailable'], type='str'), - target_status_timeout=dict(type='int', default=60), - state=dict(required=True, choices=['present', 'absent'], type='str'), + deregister_unused=dict(type="bool", default=False), + target_az=dict(type="str"), + target_group_arn=dict(type="str"), + target_group_name=dict(type="str"), + target_id=dict(type="str", required=True), + target_port=dict(type="int"), + target_status=dict( + choices=["initial", "healthy", "unhealthy", "unused", "draining", "unavailable"], type="str" + ), + target_status_timeout=dict(type="int", default=60), + state=dict(required=True, choices=["present", "absent"], type="str"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[['target_group_arn', 'target_group_name']], + mutually_exclusive=[["target_group_arn", "target_group_name"]], ) try: - connection = module.client('elbv2') + connection = module.client("elbv2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") state = module.params.get("state") - if state == 'present': + if state == "present": register_target(connection, module) else: deregister_target(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/elb_target_group.py b/plugins/modules/elb_target_group.py index 16cafc958e3..bbab1507d2d 100644 --- a/plugins/modules/elb_target_group.py +++ b/plugins/modules/elb_target_group.py @@ -459,45 +459,52 @@ def get_tg_attributes(connection, module, tg_arn): try: _attributes = connection.describe_target_group_attributes(TargetGroupArn=tg_arn, aws_retry=True) - tg_attributes = boto3_tag_list_to_ansible_dict(_attributes['Attributes']) + tg_attributes = boto3_tag_list_to_ansible_dict(_attributes["Attributes"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get target group attributes") # Replace '.' with '_' in attribute key names to make it more Ansible friendly - return dict((k.replace('.', '_'), v) for k, v in tg_attributes.items()) + return dict((k.replace(".", "_"), v) for k, v in tg_attributes.items()) def get_target_group_tags(connection, module, target_group_arn): try: _tags = connection.describe_tags(ResourceArns=[target_group_arn], aws_retry=True) - return _tags['TagDescriptions'][0]['Tags'] + return _tags["TagDescriptions"][0]["Tags"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get target group tags") def get_target_group(connection, module, retry_missing=False): - extra_codes = ['TargetGroupNotFound'] if retry_missing else [] + extra_codes = ["TargetGroupNotFound"] if retry_missing else [] try: - target_group_paginator = connection.get_paginator('describe_target_groups').paginate(Names=[module.params.get("name")]) + target_group_paginator = connection.get_paginator("describe_target_groups").paginate( + Names=[module.params.get("name")] + ) jittered_retry = AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=extra_codes) result = jittered_retry(target_group_paginator.build_full_result)() - except is_boto3_error_code('TargetGroupNotFound'): + except is_boto3_error_code("TargetGroupNotFound"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't get target group") - return result['TargetGroups'][0] + return result["TargetGroups"][0] def wait_for_status(connection, module, target_group_arn, targets, status): polling_increment_secs = 5 - max_retries = (module.params.get('wait_timeout') // polling_increment_secs) + max_retries = module.params.get("wait_timeout") // polling_increment_secs status_achieved = False for x in range(0, max_retries): try: - response = connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=targets, aws_retry=True) - if response['TargetHealthDescriptions'][0]['TargetHealth']['State'] == status: + response = connection.describe_target_health( + TargetGroupArn=target_group_arn, Targets=targets, aws_retry=True + ) + if response["TargetHealthDescriptions"][0]["TargetHealth"]["State"] == status: status_achieved = True break else: @@ -527,172 +534,204 @@ def create_or_update_attributes(connection, module, target_group, new_target_gro update_attributes = [] # Get current attributes - current_tg_attributes = get_tg_attributes(connection, module, target_group['TargetGroupArn']) + current_tg_attributes = get_tg_attributes(connection, module, target_group["TargetGroupArn"]) if deregistration_delay_timeout is not None: - if str(deregistration_delay_timeout) != current_tg_attributes['deregistration_delay_timeout_seconds']: - update_attributes.append({'Key': 'deregistration_delay.timeout_seconds', 'Value': str(deregistration_delay_timeout)}) + if str(deregistration_delay_timeout) != current_tg_attributes["deregistration_delay_timeout_seconds"]: + update_attributes.append( + {"Key": "deregistration_delay.timeout_seconds", "Value": str(deregistration_delay_timeout)} + ) if deregistration_connection_termination is not None: - if deregistration_connection_termination and current_tg_attributes.get('deregistration_delay_connection_termination_enabled') != "true": - update_attributes.append({'Key': 'deregistration_delay.connection_termination.enabled', 'Value': 'true'}) + if ( + deregistration_connection_termination + and current_tg_attributes.get("deregistration_delay_connection_termination_enabled") != "true" + ): + update_attributes.append({"Key": "deregistration_delay.connection_termination.enabled", "Value": "true"}) if stickiness_enabled is not None: - if stickiness_enabled and current_tg_attributes['stickiness_enabled'] != "true": - update_attributes.append({'Key': 'stickiness.enabled', 'Value': 'true'}) + if stickiness_enabled and current_tg_attributes["stickiness_enabled"] != "true": + update_attributes.append({"Key": "stickiness.enabled", "Value": "true"}) if stickiness_lb_cookie_duration is not None: - if str(stickiness_lb_cookie_duration) != current_tg_attributes['stickiness_lb_cookie_duration_seconds']: - update_attributes.append({'Key': 'stickiness.lb_cookie.duration_seconds', 'Value': str(stickiness_lb_cookie_duration)}) + if str(stickiness_lb_cookie_duration) != current_tg_attributes["stickiness_lb_cookie_duration_seconds"]: + update_attributes.append( + {"Key": "stickiness.lb_cookie.duration_seconds", "Value": str(stickiness_lb_cookie_duration)} + ) if stickiness_type is not None: - if stickiness_type != current_tg_attributes.get('stickiness_type'): - update_attributes.append({'Key': 'stickiness.type', 'Value': stickiness_type}) + if stickiness_type != current_tg_attributes.get("stickiness_type"): + update_attributes.append({"Key": "stickiness.type", "Value": stickiness_type}) if stickiness_app_cookie_name is not None: - if stickiness_app_cookie_name != current_tg_attributes.get('stickiness_app_cookie_name'): - update_attributes.append({'Key': 'stickiness.app_cookie.cookie_name', 'Value': str(stickiness_app_cookie_name)}) + if stickiness_app_cookie_name != current_tg_attributes.get("stickiness_app_cookie_name"): + update_attributes.append( + {"Key": "stickiness.app_cookie.cookie_name", "Value": str(stickiness_app_cookie_name)} + ) if stickiness_app_cookie_duration is not None: - if str(stickiness_app_cookie_duration) != current_tg_attributes['stickiness_app_cookie_duration_seconds']: - update_attributes.append({'Key': 'stickiness.app_cookie.duration_seconds', 'Value': str(stickiness_app_cookie_duration)}) + if str(stickiness_app_cookie_duration) != current_tg_attributes["stickiness_app_cookie_duration_seconds"]: + update_attributes.append( + {"Key": "stickiness.app_cookie.duration_seconds", "Value": str(stickiness_app_cookie_duration)} + ) if preserve_client_ip_enabled is not None: - if target_type not in ('udp', 'tcp_udp'): - if str(preserve_client_ip_enabled).lower() != current_tg_attributes.get('preserve_client_ip_enabled'): - update_attributes.append({'Key': 'preserve_client_ip.enabled', 'Value': str(preserve_client_ip_enabled).lower()}) + if target_type not in ("udp", "tcp_udp"): + if str(preserve_client_ip_enabled).lower() != current_tg_attributes.get("preserve_client_ip_enabled"): + update_attributes.append( + {"Key": "preserve_client_ip.enabled", "Value": str(preserve_client_ip_enabled).lower()} + ) if proxy_protocol_v2_enabled is not None: - if str(proxy_protocol_v2_enabled).lower() != current_tg_attributes.get('proxy_protocol_v2_enabled'): - update_attributes.append({'Key': 'proxy_protocol_v2.enabled', 'Value': str(proxy_protocol_v2_enabled).lower()}) + if str(proxy_protocol_v2_enabled).lower() != current_tg_attributes.get("proxy_protocol_v2_enabled"): + update_attributes.append( + {"Key": "proxy_protocol_v2.enabled", "Value": str(proxy_protocol_v2_enabled).lower()} + ) if load_balancing_algorithm_type is not None: - if str(load_balancing_algorithm_type) != current_tg_attributes['load_balancing_algorithm_type']: - update_attributes.append({'Key': 'load_balancing.algorithm.type', 'Value': str(load_balancing_algorithm_type)}) + if str(load_balancing_algorithm_type) != current_tg_attributes["load_balancing_algorithm_type"]: + update_attributes.append( + {"Key": "load_balancing.algorithm.type", "Value": str(load_balancing_algorithm_type)} + ) if update_attributes: try: - connection.modify_target_group_attributes(TargetGroupArn=target_group['TargetGroupArn'], Attributes=update_attributes, aws_retry=True) + connection.modify_target_group_attributes( + TargetGroupArn=target_group["TargetGroupArn"], Attributes=update_attributes, aws_retry=True + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # Something went wrong setting attributes. If this target group was created during this task, delete it to leave a consistent state if new_target_group: - connection.delete_target_group(TargetGroupArn=target_group['TargetGroupArn'], aws_retry=True) + connection.delete_target_group(TargetGroupArn=target_group["TargetGroupArn"], aws_retry=True) module.fail_json_aws(e, msg="Couldn't delete target group") return changed def create_or_update_target_group(connection, module): - changed = False new_target_group = False params = dict() target_type = module.params.get("target_type") - params['Name'] = module.params.get("name") - params['TargetType'] = target_type + params["Name"] = module.params.get("name") + params["TargetType"] = target_type if target_type != "lambda": - params['Protocol'] = module.params.get("protocol").upper() - if module.params.get('protocol_version') is not None: - params['ProtocolVersion'] = module.params.get('protocol_version') - params['Port'] = module.params.get("port") - params['VpcId'] = module.params.get("vpc_id") + params["Protocol"] = module.params.get("protocol").upper() + if module.params.get("protocol_version") is not None: + params["ProtocolVersion"] = module.params.get("protocol_version") + params["Port"] = module.params.get("port") + params["VpcId"] = module.params.get("vpc_id") tags = module.params.get("tags") purge_tags = module.params.get("purge_tags") health_option_keys = [ - "health_check_path", "health_check_protocol", "health_check_interval", "health_check_timeout", - "healthy_threshold_count", "unhealthy_threshold_count", "successful_response_codes" + "health_check_path", + "health_check_protocol", + "health_check_interval", + "health_check_timeout", + "healthy_threshold_count", + "unhealthy_threshold_count", + "successful_response_codes", ] health_options = any(module.params[health_option_key] is not None for health_option_key in health_option_keys) # Set health check if anything set if health_options: - if module.params.get("health_check_protocol") is not None: - params['HealthCheckProtocol'] = module.params.get("health_check_protocol").upper() + params["HealthCheckProtocol"] = module.params.get("health_check_protocol").upper() if module.params.get("health_check_port") is not None: - params['HealthCheckPort'] = module.params.get("health_check_port") + params["HealthCheckPort"] = module.params.get("health_check_port") if module.params.get("health_check_interval") is not None: - params['HealthCheckIntervalSeconds'] = module.params.get("health_check_interval") + params["HealthCheckIntervalSeconds"] = module.params.get("health_check_interval") if module.params.get("health_check_timeout") is not None: - params['HealthCheckTimeoutSeconds'] = module.params.get("health_check_timeout") + params["HealthCheckTimeoutSeconds"] = module.params.get("health_check_timeout") if module.params.get("healthy_threshold_count") is not None: - params['HealthyThresholdCount'] = module.params.get("healthy_threshold_count") + params["HealthyThresholdCount"] = module.params.get("healthy_threshold_count") if module.params.get("unhealthy_threshold_count") is not None: - params['UnhealthyThresholdCount'] = module.params.get("unhealthy_threshold_count") + params["UnhealthyThresholdCount"] = module.params.get("unhealthy_threshold_count") # Only need to check response code and path for http(s) health checks protocol = module.params.get("health_check_protocol") - if protocol is not None and protocol.upper() in ['HTTP', 'HTTPS']: - + if protocol is not None and protocol.upper() in ["HTTP", "HTTPS"]: if module.params.get("health_check_path") is not None: - params['HealthCheckPath'] = module.params.get("health_check_path") + params["HealthCheckPath"] = module.params.get("health_check_path") if module.params.get("successful_response_codes") is not None: - params['Matcher'] = {} - code_key = 'HttpCode' - protocol_version = module.params.get('protocol_version') + params["Matcher"] = {} + code_key = "HttpCode" + protocol_version = module.params.get("protocol_version") if protocol_version is not None and protocol_version.upper() == "GRPC": - code_key = 'GrpcCode' - params['Matcher'][code_key] = module.params.get("successful_response_codes") + code_key = "GrpcCode" + params["Matcher"][code_key] = module.params.get("successful_response_codes") # Get target group target_group = get_target_group(connection, module) if target_group: - diffs = [param for param in ('Port', 'Protocol', 'VpcId') - if target_group.get(param) != params.get(param)] + diffs = [param for param in ("Port", "Protocol", "VpcId") if target_group.get(param) != params.get(param)] if diffs: - module.fail_json(msg="Cannot modify %s parameter(s) for a target group" % - ", ".join(diffs)) + module.fail_json(msg="Cannot modify %s parameter(s) for a target group" % ", ".join(diffs)) # Target group exists so check health check parameters match what has been passed health_check_params = dict() # Modify health check if anything set if health_options: - # Health check protocol - if 'HealthCheckProtocol' in params and target_group['HealthCheckProtocol'] != params['HealthCheckProtocol']: - health_check_params['HealthCheckProtocol'] = params['HealthCheckProtocol'] + if "HealthCheckProtocol" in params and target_group["HealthCheckProtocol"] != params["HealthCheckProtocol"]: + health_check_params["HealthCheckProtocol"] = params["HealthCheckProtocol"] # Health check port - if 'HealthCheckPort' in params and target_group['HealthCheckPort'] != params['HealthCheckPort']: - health_check_params['HealthCheckPort'] = params['HealthCheckPort'] + if "HealthCheckPort" in params and target_group["HealthCheckPort"] != params["HealthCheckPort"]: + health_check_params["HealthCheckPort"] = params["HealthCheckPort"] # Health check interval - if 'HealthCheckIntervalSeconds' in params and target_group['HealthCheckIntervalSeconds'] != params['HealthCheckIntervalSeconds']: - health_check_params['HealthCheckIntervalSeconds'] = params['HealthCheckIntervalSeconds'] + if ( + "HealthCheckIntervalSeconds" in params + and target_group["HealthCheckIntervalSeconds"] != params["HealthCheckIntervalSeconds"] + ): + health_check_params["HealthCheckIntervalSeconds"] = params["HealthCheckIntervalSeconds"] # Health check timeout - if 'HealthCheckTimeoutSeconds' in params and target_group['HealthCheckTimeoutSeconds'] != params['HealthCheckTimeoutSeconds']: - health_check_params['HealthCheckTimeoutSeconds'] = params['HealthCheckTimeoutSeconds'] + if ( + "HealthCheckTimeoutSeconds" in params + and target_group["HealthCheckTimeoutSeconds"] != params["HealthCheckTimeoutSeconds"] + ): + health_check_params["HealthCheckTimeoutSeconds"] = params["HealthCheckTimeoutSeconds"] # Healthy threshold - if 'HealthyThresholdCount' in params and target_group['HealthyThresholdCount'] != params['HealthyThresholdCount']: - health_check_params['HealthyThresholdCount'] = params['HealthyThresholdCount'] + if ( + "HealthyThresholdCount" in params + and target_group["HealthyThresholdCount"] != params["HealthyThresholdCount"] + ): + health_check_params["HealthyThresholdCount"] = params["HealthyThresholdCount"] # Unhealthy threshold - if 'UnhealthyThresholdCount' in params and target_group['UnhealthyThresholdCount'] != params['UnhealthyThresholdCount']: - health_check_params['UnhealthyThresholdCount'] = params['UnhealthyThresholdCount'] + if ( + "UnhealthyThresholdCount" in params + and target_group["UnhealthyThresholdCount"] != params["UnhealthyThresholdCount"] + ): + health_check_params["UnhealthyThresholdCount"] = params["UnhealthyThresholdCount"] # Only need to check response code and path for http(s) health checks - if target_group['HealthCheckProtocol'] in ['HTTP', 'HTTPS']: + if target_group["HealthCheckProtocol"] in ["HTTP", "HTTPS"]: # Health check path - if 'HealthCheckPath' in params and target_group['HealthCheckPath'] != params['HealthCheckPath']: - health_check_params['HealthCheckPath'] = params['HealthCheckPath'] + if "HealthCheckPath" in params and target_group["HealthCheckPath"] != params["HealthCheckPath"]: + health_check_params["HealthCheckPath"] = params["HealthCheckPath"] # Matcher (successful response codes) # TODO: required and here? - if 'Matcher' in params: - code_key = 'HttpCode' - if target_group['ProtocolVersion'] == 'GRPC': - code_key = 'GrpcCode' - current_matcher_list = target_group['Matcher'][code_key].split(',') - requested_matcher_list = params['Matcher'][code_key].split(',') + if "Matcher" in params: + code_key = "HttpCode" + if target_group["ProtocolVersion"] == "GRPC": + code_key = "GrpcCode" + current_matcher_list = target_group["Matcher"][code_key].split(",") + requested_matcher_list = params["Matcher"][code_key].split(",") if set(current_matcher_list) != set(requested_matcher_list): - health_check_params['Matcher'] = {} - health_check_params['Matcher'][code_key] = ','.join(requested_matcher_list) + health_check_params["Matcher"] = {} + health_check_params["Matcher"][code_key] = ",".join(requested_matcher_list) try: if health_check_params: - connection.modify_target_group(TargetGroupArn=target_group['TargetGroupArn'], aws_retry=True, **health_check_params) + connection.modify_target_group( + TargetGroupArn=target_group["TargetGroupArn"], aws_retry=True, **health_check_params + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't update target group") @@ -703,27 +742,27 @@ def create_or_update_target_group(connection, module): # describe_target_health seems to be the only way to get them try: current_targets = connection.describe_target_health( - TargetGroupArn=target_group['TargetGroupArn'], aws_retry=True) + TargetGroupArn=target_group["TargetGroupArn"], aws_retry=True + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get target group health") if module.params.get("targets"): - if target_type != "lambda": - params['Targets'] = module.params.get("targets") + params["Targets"] = module.params.get("targets") # Correct type of target ports - for target in params['Targets']: - target['Port'] = int(target.get('Port', module.params.get('port'))) + for target in params["Targets"]: + target["Port"] = int(target.get("Port", module.params.get("port"))) current_instance_ids = [] - for instance in current_targets['TargetHealthDescriptions']: - current_instance_ids.append(instance['Target']['Id']) + for instance in current_targets["TargetHealthDescriptions"]: + current_instance_ids.append(instance["Target"]["Id"]) new_instance_ids = [] - for instance in params['Targets']: - new_instance_ids.append(instance['Id']) + for instance in params["Targets"]: + new_instance_ids.append(instance["Id"]) add_instances = set(new_instance_ids) - set(current_instance_ids) @@ -738,37 +777,49 @@ def create_or_update_target_group(connection, module): changed = True try: - connection.register_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=instances_to_add, aws_retry=True) + connection.register_targets( + TargetGroupArn=target_group["TargetGroupArn"], Targets=instances_to_add, aws_retry=True + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't register targets") if module.params.get("wait"): status_achieved, registered_instances = wait_for_status( - connection, module, target_group['TargetGroupArn'], instances_to_add, 'healthy') + connection, module, target_group["TargetGroupArn"], instances_to_add, "healthy" + ) if not status_achieved: module.fail_json( - msg='Error waiting for target registration to be healthy - please check the AWS console') + msg="Error waiting for target registration to be healthy - please check the AWS console" + ) remove_instances = set(current_instance_ids) - set(new_instance_ids) if remove_instances: instances_to_remove = [] - for target in current_targets['TargetHealthDescriptions']: - if target['Target']['Id'] in remove_instances: - instances_to_remove.append({'Id': target['Target']['Id'], 'Port': target['Target']['Port']}) + for target in current_targets["TargetHealthDescriptions"]: + if target["Target"]["Id"] in remove_instances: + instances_to_remove.append( + {"Id": target["Target"]["Id"], "Port": target["Target"]["Port"]} + ) changed = True try: - connection.deregister_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=instances_to_remove, aws_retry=True) + connection.deregister_targets( + TargetGroupArn=target_group["TargetGroupArn"], + Targets=instances_to_remove, + aws_retry=True, + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't remove targets") if module.params.get("wait"): status_achieved, registered_instances = wait_for_status( - connection, module, target_group['TargetGroupArn'], instances_to_remove, 'unused') + connection, module, target_group["TargetGroupArn"], instances_to_remove, "unused" + ) if not status_achieved: module.fail_json( - msg='Error waiting for target deregistration - please check the AWS console') + msg="Error waiting for target deregistration - please check the AWS console" + ) # register lambda target else: @@ -786,40 +837,40 @@ def create_or_update_target_group(connection, module): if changed: if target.get("Id"): response = connection.register_targets( - TargetGroupArn=target_group['TargetGroupArn'], - Targets=[ - { - "Id": target['Id'] - } - ], - aws_retry=True + TargetGroupArn=target_group["TargetGroupArn"], + Targets=[{"Id": target["Id"]}], + aws_retry=True, ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Couldn't register targets") + module.fail_json_aws(e, msg="Couldn't register targets") else: if target_type != "lambda": - - current_instances = current_targets['TargetHealthDescriptions'] + current_instances = current_targets["TargetHealthDescriptions"] if current_instances: instances_to_remove = [] - for target in current_targets['TargetHealthDescriptions']: - instances_to_remove.append({'Id': target['Target']['Id'], 'Port': target['Target']['Port']}) + for target in current_targets["TargetHealthDescriptions"]: + instances_to_remove.append({"Id": target["Target"]["Id"], "Port": target["Target"]["Port"]}) changed = True try: - connection.deregister_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=instances_to_remove, aws_retry=True) + connection.deregister_targets( + TargetGroupArn=target_group["TargetGroupArn"], + Targets=instances_to_remove, + aws_retry=True, + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't remove targets") if module.params.get("wait"): status_achieved, registered_instances = wait_for_status( - connection, module, target_group['TargetGroupArn'], instances_to_remove, 'unused') + connection, module, target_group["TargetGroupArn"], instances_to_remove, "unused" + ) if not status_achieved: module.fail_json( - msg='Error waiting for target deregistration - please check the AWS console') + msg="Error waiting for target deregistration - please check the AWS console" + ) # remove lambda targets else: @@ -830,7 +881,10 @@ def create_or_update_target_group(connection, module): target_to_remove = current_targets["TargetHealthDescriptions"][0]["Target"]["Id"] if changed: connection.deregister_targets( - TargetGroupArn=target_group['TargetGroupArn'], Targets=[{"Id": target_to_remove}], aws_retry=True) + TargetGroupArn=target_group["TargetGroupArn"], + Targets=[{"Id": target_to_remove}], + aws_retry=True, + ) else: try: connection.create_target_group(aws_retry=True, **params) @@ -843,33 +897,32 @@ def create_or_update_target_group(connection, module): if module.params.get("targets"): if target_type != "lambda": - params['Targets'] = module.params.get("targets") + params["Targets"] = module.params.get("targets") try: - connection.register_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=params['Targets'], aws_retry=True) + connection.register_targets( + TargetGroupArn=target_group["TargetGroupArn"], Targets=params["Targets"], aws_retry=True + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't register targets") if module.params.get("wait"): - status_achieved, registered_instances = wait_for_status(connection, module, target_group['TargetGroupArn'], params['Targets'], 'healthy') + status_achieved, registered_instances = wait_for_status( + connection, module, target_group["TargetGroupArn"], params["Targets"], "healthy" + ) if not status_achieved: - module.fail_json(msg='Error waiting for target registration to be healthy - please check the AWS console') + module.fail_json( + msg="Error waiting for target registration to be healthy - please check the AWS console" + ) else: try: target = module.params.get("targets")[0] response = connection.register_targets( - TargetGroupArn=target_group['TargetGroupArn'], - Targets=[ - { - "Id": target["Id"] - } - ], - aws_retry=True + TargetGroupArn=target_group["TargetGroupArn"], Targets=[{"Id": target["Id"]}], aws_retry=True ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Couldn't register targets") + module.fail_json_aws(e, msg="Couldn't register targets") attributes_update = create_or_update_attributes(connection, module, target_group, new_target_group) @@ -879,13 +932,17 @@ def create_or_update_target_group(connection, module): # Tags - only need to play with tags if tags parameter has been set to something if tags is not None: # Get tags - current_tags = get_target_group_tags(connection, module, target_group['TargetGroupArn']) + current_tags = get_target_group_tags(connection, module, target_group["TargetGroupArn"]) # Delete necessary tags - tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags), tags, purge_tags) + tags_need_modify, tags_to_delete = compare_aws_tags( + boto3_tag_list_to_ansible_dict(current_tags), tags, purge_tags + ) if tags_to_delete: try: - connection.remove_tags(ResourceArns=[target_group['TargetGroupArn']], TagKeys=tags_to_delete, aws_retry=True) + connection.remove_tags( + ResourceArns=[target_group["TargetGroupArn"]], TagKeys=tags_to_delete, aws_retry=True + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete tags from target group") changed = True @@ -893,7 +950,11 @@ def create_or_update_target_group(connection, module): # Add/update tags if tags_need_modify: try: - connection.add_tags(ResourceArns=[target_group['TargetGroupArn']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify), aws_retry=True) + connection.add_tags( + ResourceArns=[target_group["TargetGroupArn"]], + Tags=ansible_dict_to_boto3_tag_list(tags_need_modify), + aws_retry=True, + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't add tags to target group") changed = True @@ -902,12 +963,14 @@ def create_or_update_target_group(connection, module): target_group = get_target_group(connection, module) # Get the target group attributes again - target_group.update(get_tg_attributes(connection, module, target_group['TargetGroupArn'])) + target_group.update(get_tg_attributes(connection, module, target_group["TargetGroupArn"])) # Convert target_group to snake_case snaked_tg = camel_dict_to_snake_dict(target_group) - snaked_tg['tags'] = boto3_tag_list_to_ansible_dict(get_target_group_tags(connection, module, target_group['TargetGroupArn'])) + snaked_tg["tags"] = boto3_tag_list_to_ansible_dict( + get_target_group_tags(connection, module, target_group["TargetGroupArn"]) + ) module.exit_json(changed=changed, **snaked_tg) @@ -918,7 +981,7 @@ def delete_target_group(connection, module): if tg: try: - connection.delete_target_group(TargetGroupArn=tg['TargetGroupArn'], aws_retry=True) + connection.delete_target_group(TargetGroupArn=tg["TargetGroupArn"], aws_retry=True) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete target group") @@ -927,66 +990,69 @@ def delete_target_group(connection, module): def main(): - protocols_list = ['http', 'https', 'tcp', 'tls', 'udp', 'tcp_udp', 'HTTP', - 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP'] + protocols_list = ["http", "https", "tcp", "tls", "udp", "tcp_udp", "HTTP", "HTTPS", "TCP", "TLS", "UDP", "TCP_UDP"] argument_spec = dict( - deregistration_delay_timeout=dict(type='int'), - deregistration_connection_termination=dict(type='bool', default=False), + deregistration_delay_timeout=dict(type="int"), + deregistration_connection_termination=dict(type="bool", default=False), health_check_protocol=dict(choices=protocols_list), health_check_port=dict(), health_check_path=dict(), - health_check_interval=dict(type='int'), - health_check_timeout=dict(type='int'), - healthy_threshold_count=dict(type='int'), - modify_targets=dict(default=True, type='bool'), + health_check_interval=dict(type="int"), + health_check_timeout=dict(type="int"), + healthy_threshold_count=dict(type="int"), + modify_targets=dict(default=True, type="bool"), name=dict(required=True), - port=dict(type='int'), + port=dict(type="int"), protocol=dict(choices=protocols_list), - protocol_version=dict(type='str', choices=['GRPC', 'HTTP1', 'HTTP2']), - purge_tags=dict(default=True, type='bool'), - stickiness_enabled=dict(type='bool'), + protocol_version=dict(type="str", choices=["GRPC", "HTTP1", "HTTP2"]), + purge_tags=dict(default=True, type="bool"), + stickiness_enabled=dict(type="bool"), stickiness_type=dict(), - stickiness_lb_cookie_duration=dict(type='int'), - stickiness_app_cookie_duration=dict(type='int'), + stickiness_lb_cookie_duration=dict(type="int"), + stickiness_app_cookie_duration=dict(type="int"), stickiness_app_cookie_name=dict(), - load_balancing_algorithm_type=dict(type='str', choices=['round_robin', 'least_outstanding_requests']), - state=dict(required=True, choices=['present', 'absent']), + load_balancing_algorithm_type=dict(type="str", choices=["round_robin", "least_outstanding_requests"]), + state=dict(required=True, choices=["present", "absent"]), successful_response_codes=dict(), - tags=dict(type='dict', aliases=['resource_tags']), - target_type=dict(choices=['instance', 'ip', 'lambda', 'alb']), - targets=dict(type='list', elements='dict'), - unhealthy_threshold_count=dict(type='int'), + tags=dict(type="dict", aliases=["resource_tags"]), + target_type=dict(choices=["instance", "ip", "lambda", "alb"]), + targets=dict(type="list", elements="dict"), + unhealthy_threshold_count=dict(type="int"), vpc_id=dict(), - preserve_client_ip_enabled=dict(type='bool'), - proxy_protocol_v2_enabled=dict(type='bool'), - wait_timeout=dict(type='int', default=200), - wait=dict(type='bool', default=False) + preserve_client_ip_enabled=dict(type="bool"), + proxy_protocol_v2_enabled=dict(type="bool"), + wait_timeout=dict(type="int", default=200), + wait=dict(type="bool", default=False), ) required_by = dict( - health_check_path=['health_check_protocol'], - successful_response_codes=['health_check_protocol'], + health_check_path=["health_check_protocol"], + successful_response_codes=["health_check_protocol"], ) required_if = [ - ['target_type', 'instance', ['protocol', 'port', 'vpc_id']], - ['target_type', 'ip', ['protocol', 'port', 'vpc_id']], - ['target_type', 'alb', ['protocol', 'port', 'vpc_id']], + ["target_type", "instance", ["protocol", "port", "vpc_id"]], + ["target_type", "ip", ["protocol", "port", "vpc_id"]], + ["target_type", "alb", ["protocol", "port", "vpc_id"]], ] module = AnsibleAWSModule(argument_spec=argument_spec, required_by=required_by, required_if=required_if) - if module.params.get('target_type') is None: - module.params['target_type'] = 'instance' + if module.params.get("target_type") is None: + module.params["target_type"] = "instance" - connection = module.client('elbv2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + connection = module.client("elbv2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) - if module.params.get('state') == 'present': - if module.params.get('protocol') in ['http', 'https', 'HTTP', 'HTTPS'] and module.params.get('deregistration_connection_termination', None): - module.fail_json(msg="A target group with HTTP/S protocol does not support setting deregistration_connection_termination") + if module.params.get("state") == "present": + if module.params.get("protocol") in ["http", "https", "HTTP", "HTTPS"] and module.params.get( + "deregistration_connection_termination", None + ): + module.fail_json( + msg="A target group with HTTP/S protocol does not support setting deregistration_connection_termination" + ) create_or_update_target_group(connection, module) else: delete_target_group(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/elb_target_group_info.py b/plugins/modules/elb_target_group_info.py index d58c2f248f5..bf02db21f15 100644 --- a/plugins/modules/elb_target_group_info.py +++ b/plugins/modules/elb_target_group_info.py @@ -220,40 +220,39 @@ @AWSRetry.jittered_backoff(retries=10) def get_paginator(**kwargs): - paginator = client.get_paginator('describe_target_groups') + paginator = client.get_paginator("describe_target_groups") return paginator.paginate(**kwargs).build_full_result() def get_target_group_attributes(target_group_arn): - try: - target_group_attributes = boto3_tag_list_to_ansible_dict(client.describe_target_group_attributes(TargetGroupArn=target_group_arn)['Attributes']) + target_group_attributes = boto3_tag_list_to_ansible_dict( + client.describe_target_group_attributes(TargetGroupArn=target_group_arn)["Attributes"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe target group attributes") # Replace '.' with '_' in attribute key names to make it more Ansibley - return dict((k.replace('.', '_'), v) - for (k, v) in target_group_attributes.items()) + return dict((k.replace(".", "_"), v) for (k, v) in target_group_attributes.items()) def get_target_group_tags(target_group_arn): - try: - return boto3_tag_list_to_ansible_dict(client.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags']) + return boto3_tag_list_to_ansible_dict( + client.describe_tags(ResourceArns=[target_group_arn])["TagDescriptions"][0]["Tags"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe group tags") def get_target_group_targets_health(target_group_arn): - try: - return client.describe_target_health(TargetGroupArn=target_group_arn)['TargetHealthDescriptions'] + return client.describe_target_health(TargetGroupArn=target_group_arn)["TargetHealthDescriptions"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to get target health") def list_target_groups(): - load_balancer_arn = module.params.get("load_balancer_arn") target_group_arns = module.params.get("target_group_arns") names = module.params.get("names") @@ -268,24 +267,29 @@ def list_target_groups(): target_groups = get_paginator(TargetGroupArns=target_group_arns) if names: target_groups = get_paginator(Names=names) - except is_boto3_error_code('TargetGroupNotFound'): + except is_boto3_error_code("TargetGroupNotFound"): module.exit_json(target_groups=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to list target groups") # Get the attributes and tags for each target group - for target_group in target_groups['TargetGroups']: - target_group.update(get_target_group_attributes(target_group['TargetGroupArn'])) + for target_group in target_groups["TargetGroups"]: + target_group.update(get_target_group_attributes(target_group["TargetGroupArn"])) # Turn the boto3 result in to ansible_friendly_snaked_names - snaked_target_groups = [camel_dict_to_snake_dict(target_group) for target_group in target_groups['TargetGroups']] + snaked_target_groups = [camel_dict_to_snake_dict(target_group) for target_group in target_groups["TargetGroups"]] # Get tags for each target group for snaked_target_group in snaked_target_groups: - snaked_target_group['tags'] = get_target_group_tags(snaked_target_group['target_group_arn']) + snaked_target_group["tags"] = get_target_group_tags(snaked_target_group["target_group_arn"]) if collect_targets_health: - snaked_target_group['targets_health_description'] = [camel_dict_to_snake_dict( - target) for target in get_target_group_targets_health(snaked_target_group['target_group_arn'])] + snaked_target_group["targets_health_description"] = [ + camel_dict_to_snake_dict(target) + for target in get_target_group_targets_health(snaked_target_group["target_group_arn"]) + ] module.exit_json(target_groups=snaked_target_groups) @@ -295,25 +299,25 @@ def main(): global client argument_spec = dict( - load_balancer_arn=dict(type='str'), - target_group_arns=dict(type='list', elements='str'), - names=dict(type='list', elements='str'), - collect_targets_health=dict(default=False, type='bool', required=False), + load_balancer_arn=dict(type="str"), + target_group_arns=dict(type="list", elements="str"), + names=dict(type="list", elements="str"), + collect_targets_health=dict(default=False, type="bool", required=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[['load_balancer_arn', 'target_group_arns', 'names']], + mutually_exclusive=[["load_balancer_arn", "target_group_arns", "names"]], supports_check_mode=True, ) try: - client = module.client('elbv2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + client = module.client("elbv2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") list_target_groups() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/elb_target_info.py b/plugins/modules/elb_target_info.py index 393e290e51b..e318f6c5b65 100644 --- a/plugins/modules/elb_target_info.py +++ b/plugins/modules/elb_target_info.py @@ -220,6 +220,7 @@ class Target(object): """Models a target in a target group""" + def __init__(self, target_id, port, az, raw_target_health): self.target_port = port self.target_id = target_id @@ -240,10 +241,7 @@ def __init__(self, **kwargs): self.targets = [] def add_target(self, target_id, target_port, target_az, raw_target_health): - self.targets.append(Target(target_id, - target_port, - target_az, - raw_target_health)) + self.targets.append(Target(target_id, target_port, target_az, raw_target_health)) def to_dict(self): object_dict = vars(self) @@ -255,28 +253,17 @@ def get_targets(self): class TargetInfoGatherer(object): - def __init__(self, module, instance_id, get_unused_target_groups): self.module = module try: - self.ec2 = self.module.client( - "ec2", - retry_decorator=AWSRetry.jittered_backoff(retries=10) - ) + self.ec2 = self.module.client("ec2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) except (ClientError, BotoCoreError) as e: - self.module.fail_json_aws(e, - msg="Couldn't connect to ec2" - ) + self.module.fail_json_aws(e, msg="Couldn't connect to ec2") try: - self.elbv2 = self.module.client( - "elbv2", - retry_decorator=AWSRetry.jittered_backoff(retries=10) - ) + self.elbv2 = self.module.client("elbv2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, - msg="Could not connect to elbv2" - ) + self.module.fail_json_aws(e, msg="Could not connect to elbv2") self.instance_id = instance_id self.get_unused_target_groups = get_unused_target_groups @@ -284,25 +271,19 @@ def __init__(self, module, instance_id, get_unused_target_groups): def _get_instance_ips(self): """Fetch all IPs associated with this instance so that we can determine - whether or not an instance is in an IP-based target group""" + whether or not an instance is in an IP-based target group""" try: # get ahold of the instance in the API - reservations = self.ec2.describe_instances( - InstanceIds=[self.instance_id], - aws_retry=True - )["Reservations"] + reservations = self.ec2.describe_instances(InstanceIds=[self.instance_id], aws_retry=True)["Reservations"] except (BotoCoreError, ClientError) as e: # typically this will happen if the instance doesn't exist - self.module.fail_json_aws(e, - msg="Could not get instance info" + - " for instance '%s'" % - (self.instance_id) - ) + self.module.fail_json_aws( + e, + msg="Could not get instance info for instance '%s'" % (self.instance_id), + ) if len(reservations) < 1: - self.module.fail_json( - msg="Instance ID %s could not be found" % self.instance_id - ) + self.module.fail_json(msg="Instance ID %s could not be found" % self.instance_id) instance = reservations[0]["Instances"][0] @@ -319,38 +300,36 @@ def _get_instance_ips(self): def _get_target_group_objects(self): """helper function to build a list of TargetGroup objects based on - the AWS API""" + the AWS API""" try: - paginator = self.elbv2.get_paginator( - "describe_target_groups" - ) + paginator = self.elbv2.get_paginator("describe_target_groups") tg_response = paginator.paginate().build_full_result() except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, - msg="Could not describe target" + - " groups" - ) + self.module.fail_json_aws( + e, + msg="Could not describe target groups", + ) # build list of TargetGroup objects representing every target group in # the system target_groups = [] for each_tg in tg_response["TargetGroups"]: - if not self.get_unused_target_groups and \ - len(each_tg["LoadBalancerArns"]) < 1: + if not self.get_unused_target_groups and len(each_tg["LoadBalancerArns"]) < 1: # only collect target groups that actually are connected # to LBs continue target_groups.append( - TargetGroup(target_group_arn=each_tg["TargetGroupArn"], - target_group_type=each_tg["TargetType"], - ) + TargetGroup( + target_group_arn=each_tg["TargetGroupArn"], + target_group_type=each_tg["TargetType"], + ) ) return target_groups def _get_target_descriptions(self, target_groups): """Helper function to build a list of all the target descriptions - for this target in a target group""" + for this target in a target group""" # Build a list of all the target groups pointing to this instance # based on the previous list tgs = set() @@ -358,37 +337,25 @@ def _get_target_descriptions(self, target_groups): for tg in target_groups: try: # Get the list of targets for that target group - response = self.elbv2.describe_target_health( - TargetGroupArn=tg.target_group_arn, - aws_retry=True - ) + response = self.elbv2.describe_target_health(TargetGroupArn=tg.target_group_arn, aws_retry=True) except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, - msg="Could not describe target " + - "health for target group %s" % - tg.target_group_arn - ) + self.module.fail_json_aws( + e, msg="Could not describe target " + "health for target group %s" % tg.target_group_arn + ) for t in response["TargetHealthDescriptions"]: # If the target group has this instance as a target, add to # list. This logic also accounts for the possibility of a # target being in the target group multiple times with # overridden ports - if t["Target"]["Id"] == self.instance_id or \ - t["Target"]["Id"] in self.instance_ips: - + if t["Target"]["Id"] == self.instance_id or t["Target"]["Id"] in self.instance_ips: # The 'AvailabilityZone' parameter is a weird one, see the # API docs for more. Basically it's only supposed to be # there under very specific circumstances, so we need # to account for that - az = t["Target"]["AvailabilityZone"] \ - if "AvailabilityZone" in t["Target"] \ - else None - - tg.add_target(t["Target"]["Id"], - t["Target"]["Port"], - az, - t["TargetHealth"]) + az = t["Target"]["AvailabilityZone"] if "AvailabilityZone" in t["Target"] else None + + tg.add_target(t["Target"]["Id"], t["Target"]["Port"], az, t["TargetHealth"]) # since tgs is a set, each target group will be added only # once, even though we call add on each successful match tgs.add(tg) @@ -406,8 +373,7 @@ def _get_target_groups(self): def main(): argument_spec = dict( instance_id={"required": True, "type": "str"}, - get_unused_target_groups={"required": False, - "default": True, "type": "bool"} + get_unused_target_groups={"required": False, "default": True, "type": "bool"}, ) module = AnsibleAWSModule( @@ -418,10 +384,7 @@ def main(): instance_id = module.params["instance_id"] get_unused_target_groups = module.params["get_unused_target_groups"] - tg_gatherer = TargetInfoGatherer(module, - instance_id, - get_unused_target_groups - ) + tg_gatherer = TargetInfoGatherer(module, instance_id, get_unused_target_groups) instance_target_groups = [each.to_dict() for each in tg_gatherer.tgs] diff --git a/plugins/modules/glue_connection.py b/plugins/modules/glue_connection.py index e9a6b306dac..b1c935929f8 100644 --- a/plugins/modules/glue_connection.py +++ b/plugins/modules/glue_connection.py @@ -186,13 +186,13 @@ def _get_glue_connection(connection, module): connection_name = module.params.get("name") connection_catalog_id = module.params.get("catalog_id") - params = {'Name': connection_name} + params = {"Name": connection_name} if connection_catalog_id is not None: - params['CatalogId'] = connection_catalog_id + params["CatalogId"] = connection_catalog_id try: - return connection.get_connection(aws_retry=True, **params)['Connection'] - except is_boto3_error_code('EntityNotFoundException'): + return connection.get_connection(aws_retry=True, **params)["Connection"] + except is_boto3_error_code("EntityNotFoundException"): return None @@ -208,37 +208,50 @@ def _compare_glue_connection_params(user_params, current_params): # Weirdly, boto3 doesn't return some keys if the value is empty e.g. Description # To counter this, add the key if it's missing with a blank value - if 'Description' not in current_params: - current_params['Description'] = "" - if 'MatchCriteria' not in current_params: - current_params['MatchCriteria'] = list() - if 'PhysicalConnectionRequirements' not in current_params: - current_params['PhysicalConnectionRequirements'] = dict() - current_params['PhysicalConnectionRequirements']['SecurityGroupIdList'] = [] - current_params['PhysicalConnectionRequirements']['SubnetId'] = "" - - if 'ConnectionProperties' in user_params['ConnectionInput'] and user_params['ConnectionInput']['ConnectionProperties'] \ - != current_params['ConnectionProperties']: + if "Description" not in current_params: + current_params["Description"] = "" + if "MatchCriteria" not in current_params: + current_params["MatchCriteria"] = list() + if "PhysicalConnectionRequirements" not in current_params: + current_params["PhysicalConnectionRequirements"] = dict() + current_params["PhysicalConnectionRequirements"]["SecurityGroupIdList"] = [] + current_params["PhysicalConnectionRequirements"]["SubnetId"] = "" + + if ( + "ConnectionProperties" in user_params["ConnectionInput"] + and user_params["ConnectionInput"]["ConnectionProperties"] != current_params["ConnectionProperties"] + ): return True - if 'ConnectionType' in user_params['ConnectionInput'] and user_params['ConnectionInput']['ConnectionType'] \ - != current_params['ConnectionType']: + if ( + "ConnectionType" in user_params["ConnectionInput"] + and user_params["ConnectionInput"]["ConnectionType"] != current_params["ConnectionType"] + ): return True - if 'Description' in user_params['ConnectionInput'] and user_params['ConnectionInput']['Description'] != current_params['Description']: + if ( + "Description" in user_params["ConnectionInput"] + and user_params["ConnectionInput"]["Description"] != current_params["Description"] + ): return True - if 'MatchCriteria' in user_params['ConnectionInput'] and set(user_params['ConnectionInput']['MatchCriteria']) != set(current_params['MatchCriteria']): + if "MatchCriteria" in user_params["ConnectionInput"] and set( + user_params["ConnectionInput"]["MatchCriteria"] + ) != set(current_params["MatchCriteria"]): return True - if 'PhysicalConnectionRequirements' in user_params['ConnectionInput']: - if 'SecurityGroupIdList' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \ - set(user_params['ConnectionInput']['PhysicalConnectionRequirements']['SecurityGroupIdList']) \ - != set(current_params['PhysicalConnectionRequirements']['SecurityGroupIdList']): + if "PhysicalConnectionRequirements" in user_params["ConnectionInput"]: + if "SecurityGroupIdList" in user_params["ConnectionInput"]["PhysicalConnectionRequirements"] and set( + user_params["ConnectionInput"]["PhysicalConnectionRequirements"]["SecurityGroupIdList"] + ) != set(current_params["PhysicalConnectionRequirements"]["SecurityGroupIdList"]): return True - if 'SubnetId' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \ - user_params['ConnectionInput']['PhysicalConnectionRequirements']['SubnetId'] \ - != current_params['PhysicalConnectionRequirements']['SubnetId']: + if ( + "SubnetId" in user_params["ConnectionInput"]["PhysicalConnectionRequirements"] + and user_params["ConnectionInput"]["PhysicalConnectionRequirements"]["SubnetId"] + != current_params["PhysicalConnectionRequirements"]["SubnetId"] + ): return True - if 'AvailabilityZone' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \ - user_params['ConnectionInput']['PhysicalConnectionRequirements']['AvailabilityZone'] \ - != current_params['PhysicalConnectionRequirements']['AvailabilityZone']: + if ( + "AvailabilityZone" in user_params["ConnectionInput"]["PhysicalConnectionRequirements"] + and user_params["ConnectionInput"]["PhysicalConnectionRequirements"]["AvailabilityZone"] + != current_params["PhysicalConnectionRequirements"]["AvailabilityZone"] + ): return True return False @@ -252,11 +265,11 @@ def _await_glue_connection(connection, module): while wait_timeout > time.time(): glue_connection = _get_glue_connection(connection, module) - if glue_connection and glue_connection.get('Name'): + if glue_connection and glue_connection.get("Name"): return glue_connection time.sleep(check_interval) - module.fail_json(msg='Timeout waiting for Glue connection %s' % module.params.get('name')) + module.fail_json(msg="Timeout waiting for Glue connection %s" % module.params.get("name")) def create_or_update_glue_connection(connection, connection_ec2, module, glue_connection): @@ -271,26 +284,30 @@ def create_or_update_glue_connection(connection, connection_ec2, module, glue_co changed = False params = dict() - params['ConnectionInput'] = dict() - params['ConnectionInput']['Name'] = module.params.get("name") - params['ConnectionInput']['ConnectionType'] = module.params.get("connection_type") - params['ConnectionInput']['ConnectionProperties'] = module.params.get("connection_properties") + params["ConnectionInput"] = dict() + params["ConnectionInput"]["Name"] = module.params.get("name") + params["ConnectionInput"]["ConnectionType"] = module.params.get("connection_type") + params["ConnectionInput"]["ConnectionProperties"] = module.params.get("connection_properties") if module.params.get("catalog_id") is not None: - params['CatalogId'] = module.params.get("catalog_id") + params["CatalogId"] = module.params.get("catalog_id") if module.params.get("description") is not None: - params['ConnectionInput']['Description'] = module.params.get("description") + params["ConnectionInput"]["Description"] = module.params.get("description") if module.params.get("match_criteria") is not None: - params['ConnectionInput']['MatchCriteria'] = module.params.get("match_criteria") + params["ConnectionInput"]["MatchCriteria"] = module.params.get("match_criteria") if module.params.get("security_groups") is not None or module.params.get("subnet_id") is not None: - params['ConnectionInput']['PhysicalConnectionRequirements'] = dict() + params["ConnectionInput"]["PhysicalConnectionRequirements"] = dict() if module.params.get("security_groups") is not None: # Get security group IDs from names - security_group_ids = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), connection_ec2, boto3=True) - params['ConnectionInput']['PhysicalConnectionRequirements']['SecurityGroupIdList'] = security_group_ids + security_group_ids = get_ec2_security_group_ids_from_names( + module.params.get("security_groups"), connection_ec2, boto3=True + ) + params["ConnectionInput"]["PhysicalConnectionRequirements"]["SecurityGroupIdList"] = security_group_ids if module.params.get("subnet_id") is not None: - params['ConnectionInput']['PhysicalConnectionRequirements']['SubnetId'] = module.params.get("subnet_id") + params["ConnectionInput"]["PhysicalConnectionRequirements"]["SubnetId"] = module.params.get("subnet_id") if module.params.get("availability_zone") is not None: - params['ConnectionInput']['PhysicalConnectionRequirements']['AvailabilityZone'] = module.params.get("availability_zone") + params["ConnectionInput"]["PhysicalConnectionRequirements"]["AvailabilityZone"] = module.params.get( + "availability_zone" + ) # If glue_connection is not None then check if it needs to be modified, else create it if glue_connection: @@ -298,7 +315,7 @@ def create_or_update_glue_connection(connection, connection_ec2, module, glue_co try: # We need to slightly modify the params for an update update_params = copy.deepcopy(params) - update_params['Name'] = update_params['ConnectionInput']['Name'] + update_params["Name"] = update_params["ConnectionInput"]["Name"] if not module.check_mode: connection.update_connection(aws_retry=True, **update_params) changed = True @@ -317,12 +334,17 @@ def create_or_update_glue_connection(connection, connection_ec2, module, glue_co glue_connection = _await_glue_connection(connection, module) if glue_connection: - module.deprecate("The 'connection_properties' return key is deprecated and will be replaced" - " by 'raw_connection_properties'. Both values are returned for now.", - date='2024-06-01', collection_name='community.aws') - glue_connection['RawConnectionProperties'] = glue_connection['ConnectionProperties'] + module.deprecate( + "The 'connection_properties' return key is deprecated and will be replaced" + " by 'raw_connection_properties'. Both values are returned for now.", + date="2024-06-01", + collection_name="community.aws", + ) + glue_connection["RawConnectionProperties"] = glue_connection["ConnectionProperties"] - module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_connection or {}, ignore_list=['RawConnectionProperties'])) + module.exit_json( + changed=changed, **camel_dict_to_snake_dict(glue_connection or {}, ignore_list=["RawConnectionProperties"]) + ) def delete_glue_connection(connection, module, glue_connection): @@ -336,9 +358,9 @@ def delete_glue_connection(connection, module, glue_connection): """ changed = False - params = {'ConnectionName': module.params.get("name")} + params = {"ConnectionName": module.params.get("name")} if module.params.get("catalog_id") is not None: - params['CatalogId'] = module.params.get("catalog_id") + params["CatalogId"] = module.params.get("catalog_id") if glue_connection: try: @@ -352,41 +374,41 @@ def delete_glue_connection(connection, module, glue_connection): def main(): - - argument_spec = ( - dict( - availability_zone=dict(type='str'), - catalog_id=dict(type='str'), - connection_properties=dict(type='dict'), - connection_type=dict(type='str', default='JDBC', choices=['CUSTOM', 'JDBC', 'KAFKA', 'MARKETPLACE', 'MONGODB', 'NETWORK']), - description=dict(type='str'), - match_criteria=dict(type='list', elements='str'), - name=dict(required=True, type='str'), - security_groups=dict(type='list', elements='str'), - state=dict(required=True, choices=['present', 'absent'], type='str'), - subnet_id=dict(type='str') - ) + argument_spec = dict( + availability_zone=dict(type="str"), + catalog_id=dict(type="str"), + connection_properties=dict(type="dict"), + connection_type=dict( + type="str", default="JDBC", choices=["CUSTOM", "JDBC", "KAFKA", "MARKETPLACE", "MONGODB", "NETWORK"] + ), + description=dict(type="str"), + match_criteria=dict(type="list", elements="str"), + name=dict(required=True, type="str"), + security_groups=dict(type="list", elements="str"), + state=dict(required=True, choices=["present", "absent"], type="str"), + subnet_id=dict(type="str"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[ - ('state', 'present', ['connection_properties']), - ('connection_type', 'NETWORK', ['availability_zone', 'security_groups', 'subnet_id']) - ], - supports_check_mode=True - ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[ + ("state", "present", ["connection_properties"]), + ("connection_type", "NETWORK", ["availability_zone", "security_groups", "subnet_id"]), + ], + supports_check_mode=True, + ) retry_decorator = AWSRetry.jittered_backoff(retries=10) - connection_glue = module.client('glue', retry_decorator=retry_decorator) - connection_ec2 = module.client('ec2', retry_decorator=retry_decorator) + connection_glue = module.client("glue", retry_decorator=retry_decorator) + connection_ec2 = module.client("ec2", retry_decorator=retry_decorator) glue_connection = _get_glue_connection(connection_glue, module) - if module.params.get("state") == 'present': + if module.params.get("state") == "present": create_or_update_glue_connection(connection_glue, connection_ec2, module, glue_connection) else: delete_glue_connection(connection_glue, module, glue_connection) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/glue_crawler.py b/plugins/modules/glue_crawler.py index 04c6cd3eb52..0a8598b6c7a 100644 --- a/plugins/modules/glue_crawler.py +++ b/plugins/modules/glue_crawler.py @@ -215,14 +215,17 @@ def _get_glue_crawler(connection, module, glue_crawler_name): - ''' + """ Get an AWS Glue crawler based on name. If not found, return None. - ''' + """ try: - return connection.get_crawler(aws_retry=True, Name=glue_crawler_name)['Crawler'] - except is_boto3_error_code('EntityNotFoundException'): + return connection.get_crawler(aws_retry=True, Name=glue_crawler_name)["Crawler"] + except is_boto3_error_code("EntityNotFoundException"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) @@ -238,39 +241,58 @@ def _trim_target(target): if not target: return None retval = target.copy() - if not retval.get('Exclusions', None): - retval.pop('Exclusions', None) + if not retval.get("Exclusions", None): + retval.pop("Exclusions", None) return retval def _compare_glue_crawler_params(user_params, current_params): - ''' + """ Compare Glue crawler params. If there is a difference, return True immediately else return False - ''' - if 'DatabaseName' in user_params and user_params['DatabaseName'] != current_params['DatabaseName']: + """ + if "DatabaseName" in user_params and user_params["DatabaseName"] != current_params["DatabaseName"]: return True - if 'Description' in user_params and user_params['Description'] != current_params['Description']: + if "Description" in user_params and user_params["Description"] != current_params["Description"]: return True - if 'RecrawlPolicy' in user_params and user_params['RecrawlPolicy'] != current_params['RecrawlPolicy']: + if "RecrawlPolicy" in user_params and user_params["RecrawlPolicy"] != current_params["RecrawlPolicy"]: return True - if 'Role' in user_params and user_params['Role'] != current_params['Role']: + if "Role" in user_params and user_params["Role"] != current_params["Role"]: return True - if 'SchemaChangePolicy' in user_params and user_params['SchemaChangePolicy'] != current_params['SchemaChangePolicy']: + if ( + "SchemaChangePolicy" in user_params + and user_params["SchemaChangePolicy"] != current_params["SchemaChangePolicy"] + ): return True - if 'TablePrefix' in user_params and user_params['TablePrefix'] != current_params['TablePrefix']: + if "TablePrefix" in user_params and user_params["TablePrefix"] != current_params["TablePrefix"]: return True - if 'Targets' in user_params: - if 'S3Targets' in user_params['Targets']: - if _trim_targets(user_params['Targets']['S3Targets']) != _trim_targets(current_params['Targets']['S3Targets']): + if "Targets" in user_params: + if "S3Targets" in user_params["Targets"]: + if _trim_targets(user_params["Targets"]["S3Targets"]) != _trim_targets( + current_params["Targets"]["S3Targets"] + ): return True - if 'JdbcTargets' in user_params['Targets'] and user_params['Targets']['JdbcTargets'] != current_params['Targets']['JdbcTargets']: - if _trim_targets(user_params['Targets']['JdbcTargets']) != _trim_targets(current_params['Targets']['JdbcTargets']): + if ( + "JdbcTargets" in user_params["Targets"] + and user_params["Targets"]["JdbcTargets"] != current_params["Targets"]["JdbcTargets"] + ): + if _trim_targets(user_params["Targets"]["JdbcTargets"]) != _trim_targets( + current_params["Targets"]["JdbcTargets"] + ): return True - if 'MongoDBTargets' in user_params['Targets'] and user_params['Targets']['MongoDBTargets'] != current_params['Targets']['MongoDBTargets']: + if ( + "MongoDBTargets" in user_params["Targets"] + and user_params["Targets"]["MongoDBTargets"] != current_params["Targets"]["MongoDBTargets"] + ): return True - if 'DynamoDBTargets' in user_params['Targets'] and user_params['Targets']['DynamoDBTargets'] != current_params['Targets']['DynamoDBTargets']: + if ( + "DynamoDBTargets" in user_params["Targets"] + and user_params["Targets"]["DynamoDBTargets"] != current_params["Targets"]["DynamoDBTargets"] + ): return True - if 'CatalogTargets' in user_params['Targets'] and user_params['Targets']['CatalogTargets'] != current_params['Targets']['CatalogTargets']: + if ( + "CatalogTargets" in user_params["Targets"] + and user_params["Targets"]["CatalogTargets"] != current_params["Targets"]["CatalogTargets"] + ): return True return False @@ -279,21 +301,23 @@ def _compare_glue_crawler_params(user_params, current_params): def ensure_tags(connection, module, glue_crawler): changed = False - if module.params.get('tags') is None: + if module.params.get("tags") is None: return False account_id, partition = get_aws_account_info(module) - arn = 'arn:{0}:glue:{1}:{2}:crawler/{3}'.format(partition, module.region, account_id, module.params.get('name')) + arn = "arn:{0}:glue:{1}:{2}:crawler/{3}".format(partition, module.region, account_id, module.params.get("name")) try: - existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get('Tags', {}) + existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get("Tags", {}) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: if module.check_mode: existing_tags = {} else: - module.fail_json_aws(e, msg='Unable to get tags for Glue crawler %s' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to get tags for Glue crawler %s" % module.params.get("name")) - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, module.params.get('tags'), module.params.get('purge_tags')) + tags_to_add, tags_to_remove = compare_aws_tags( + existing_tags, module.params.get("tags"), module.params.get("purge_tags") + ) if tags_to_remove: changed = True @@ -301,7 +325,7 @@ def ensure_tags(connection, module, glue_crawler): try: connection.untag_resource(aws_retry=True, ResourceArn=arn, TagsToRemove=tags_to_remove) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Glue crawler %s' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to set tags for Glue crawler %s" % module.params.get("name")) if tags_to_add: changed = True @@ -309,35 +333,37 @@ def ensure_tags(connection, module, glue_crawler): try: connection.tag_resource(aws_retry=True, ResourceArn=arn, TagsToAdd=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Glue crawler %s' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to set tags for Glue crawler %s" % module.params.get("name")) return changed def create_or_update_glue_crawler(connection, module, glue_crawler): - ''' + """ Create or update an AWS Glue crawler - ''' + """ changed = False params = dict() - params['Name'] = module.params.get('name') - params['Role'] = module.params.get('role') - params['Targets'] = module.params.get('targets') - if module.params.get('database_name') is not None: - params['DatabaseName'] = module.params.get('database_name') - if module.params.get('description') is not None: - params['Description'] = module.params.get('description') - if module.params.get('recrawl_policy') is not None: - params['RecrawlPolicy'] = snake_dict_to_camel_dict(module.params.get('recrawl_policy'), capitalize_first=True) - if module.params.get('role') is not None: - params['Role'] = module.params.get('role') - if module.params.get('schema_change_policy') is not None: - params['SchemaChangePolicy'] = snake_dict_to_camel_dict(module.params.get('schema_change_policy'), capitalize_first=True) - if module.params.get('table_prefix') is not None: - params['TablePrefix'] = module.params.get('table_prefix') - if module.params.get('targets') is not None: - params['Targets'] = module.params.get('targets') + params["Name"] = module.params.get("name") + params["Role"] = module.params.get("role") + params["Targets"] = module.params.get("targets") + if module.params.get("database_name") is not None: + params["DatabaseName"] = module.params.get("database_name") + if module.params.get("description") is not None: + params["Description"] = module.params.get("description") + if module.params.get("recrawl_policy") is not None: + params["RecrawlPolicy"] = snake_dict_to_camel_dict(module.params.get("recrawl_policy"), capitalize_first=True) + if module.params.get("role") is not None: + params["Role"] = module.params.get("role") + if module.params.get("schema_change_policy") is not None: + params["SchemaChangePolicy"] = snake_dict_to_camel_dict( + module.params.get("schema_change_policy"), capitalize_first=True + ) + if module.params.get("table_prefix") is not None: + params["TablePrefix"] = module.params.get("table_prefix") + if module.params.get("targets") is not None: + params["Targets"] = module.params.get("targets") if glue_crawler: if _compare_glue_crawler_params(params, glue_crawler): @@ -355,23 +381,26 @@ def create_or_update_glue_crawler(connection, module, glue_crawler): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) - glue_crawler = _get_glue_crawler(connection, module, params['Name']) + glue_crawler = _get_glue_crawler(connection, module, params["Name"]) changed |= ensure_tags(connection, module, glue_crawler) - module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_crawler or {}, ignore_list=['SchemaChangePolicy', 'RecrawlPolicy', 'Targets'])) + module.exit_json( + changed=changed, + **camel_dict_to_snake_dict(glue_crawler or {}, ignore_list=["SchemaChangePolicy", "RecrawlPolicy", "Targets"]), + ) def delete_glue_crawler(connection, module, glue_crawler): - ''' + """ Delete an AWS Glue crawler - ''' + """ changed = False if glue_crawler: try: if not module.check_mode: - connection.delete_crawler(aws_retry=True, Name=glue_crawler['Name']) + connection.delete_crawler(aws_retry=True, Name=glue_crawler["Name"]) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) @@ -380,46 +409,39 @@ def delete_glue_crawler(connection, module, glue_crawler): def main(): - - argument_spec = ( - dict( - database_name=dict(type='str'), - description=dict(type='str'), - name=dict(required=True, type='str'), - purge_tags=dict(type='bool', default=True), - recrawl_policy=dict(type='dict', options=dict( - recrawl_behavior=dict(type='str') - )), - role=dict(type='str'), - schema_change_policy=dict(type='dict', options=dict( - delete_behavior=dict(type='str'), - update_behavior=dict(type='str') - )), - state=dict(required=True, choices=['present', 'absent'], type='str'), - table_prefix=dict(type='str'), - tags=dict(type='dict', aliases=['resource_tags']), - targets=dict(type='dict') - ) + argument_spec = dict( + database_name=dict(type="str"), + description=dict(type="str"), + name=dict(required=True, type="str"), + purge_tags=dict(type="bool", default=True), + recrawl_policy=dict(type="dict", options=dict(recrawl_behavior=dict(type="str"))), + role=dict(type="str"), + schema_change_policy=dict( + type="dict", options=dict(delete_behavior=dict(type="str"), update_behavior=dict(type="str")) + ), + state=dict(required=True, choices=["present", "absent"], type="str"), + table_prefix=dict(type="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + targets=dict(type="dict"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[ - ('state', 'present', ['role', 'targets']) - ], - supports_check_mode=True - ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[("state", "present", ["role", "targets"])], + supports_check_mode=True, + ) - connection = module.client('glue', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + connection = module.client("glue", retry_decorator=AWSRetry.jittered_backoff(retries=10)) - state = module.params.get('state') + state = module.params.get("state") - glue_crawler = _get_glue_crawler(connection, module, module.params.get('name')) + glue_crawler = _get_glue_crawler(connection, module, module.params.get("name")) - if state == 'present': + if state == "present": create_or_update_glue_crawler(connection, module, glue_crawler) else: delete_glue_crawler(connection, module, glue_crawler) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/glue_job.py b/plugins/modules/glue_job.py index 6e979f28a9d..4740deed3c9 100644 --- a/plugins/modules/glue_job.py +++ b/plugins/modules/glue_job.py @@ -250,10 +250,13 @@ def _get_glue_job(connection, module, glue_job_name): :return: boto3 Glue job dict or None if not found """ try: - return connection.get_job(aws_retry=True, JobName=glue_job_name)['Job'] - except is_boto3_error_code('EntityNotFoundException'): + return connection.get_job(aws_retry=True, JobName=glue_job_name)["Job"] + except is_boto3_error_code("EntityNotFoundException"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) @@ -268,39 +271,43 @@ def _compare_glue_job_params(user_params, current_params): # Weirdly, boto3 doesn't return some keys if the value is empty e.g. Description # To counter this, add the key if it's missing with a blank value - if 'Description' not in current_params: - current_params['Description'] = "" - if 'DefaultArguments' not in current_params: - current_params['DefaultArguments'] = dict() + if "Description" not in current_params: + current_params["Description"] = "" + if "DefaultArguments" not in current_params: + current_params["DefaultArguments"] = dict() - if 'AllocatedCapacity' in user_params and user_params['AllocatedCapacity'] != current_params['AllocatedCapacity']: + if "AllocatedCapacity" in user_params and user_params["AllocatedCapacity"] != current_params["AllocatedCapacity"]: return True - if 'Command' in user_params: - if user_params['Command']['ScriptLocation'] != current_params['Command']['ScriptLocation']: + if "Command" in user_params: + if user_params["Command"]["ScriptLocation"] != current_params["Command"]["ScriptLocation"]: return True - if user_params['Command']['PythonVersion'] != current_params['Command']['PythonVersion']: + if user_params["Command"]["PythonVersion"] != current_params["Command"]["PythonVersion"]: return True - if 'Connections' in user_params and user_params['Connections'] != current_params['Connections']: + if "Connections" in user_params and user_params["Connections"] != current_params["Connections"]: return True - if 'DefaultArguments' in user_params and user_params['DefaultArguments'] != current_params['DefaultArguments']: + if "DefaultArguments" in user_params and user_params["DefaultArguments"] != current_params["DefaultArguments"]: return True - if 'Description' in user_params and user_params['Description'] != current_params['Description']: + if "Description" in user_params and user_params["Description"] != current_params["Description"]: return True - if 'ExecutionProperty' in user_params and user_params['ExecutionProperty']['MaxConcurrentRuns'] != current_params['ExecutionProperty']['MaxConcurrentRuns']: + if ( + "ExecutionProperty" in user_params + and user_params["ExecutionProperty"]["MaxConcurrentRuns"] + != current_params["ExecutionProperty"]["MaxConcurrentRuns"] + ): return True - if 'GlueVersion' in user_params and user_params['GlueVersion'] != current_params['GlueVersion']: + if "GlueVersion" in user_params and user_params["GlueVersion"] != current_params["GlueVersion"]: return True - if 'MaxRetries' in user_params and user_params['MaxRetries'] != current_params['MaxRetries']: + if "MaxRetries" in user_params and user_params["MaxRetries"] != current_params["MaxRetries"]: return True - if 'Role' in user_params and user_params['Role'] != current_params['Role']: + if "Role" in user_params and user_params["Role"] != current_params["Role"]: return True - if 'Timeout' in user_params and user_params['Timeout'] != current_params['Timeout']: + if "Timeout" in user_params and user_params["Timeout"] != current_params["Timeout"]: return True - if 'GlueVersion' in user_params and user_params['GlueVersion'] != current_params['GlueVersion']: + if "GlueVersion" in user_params and user_params["GlueVersion"] != current_params["GlueVersion"]: return True - if 'WorkerType' in user_params and user_params['WorkerType'] != current_params['WorkerType']: + if "WorkerType" in user_params and user_params["WorkerType"] != current_params["WorkerType"]: return True - if 'NumberOfWorkers' in user_params and user_params['NumberOfWorkers'] != current_params['NumberOfWorkers']: + if "NumberOfWorkers" in user_params and user_params["NumberOfWorkers"] != current_params["NumberOfWorkers"]: return True return False @@ -309,21 +316,23 @@ def _compare_glue_job_params(user_params, current_params): def ensure_tags(connection, module, glue_job): changed = False - if module.params.get('tags') is None: + if module.params.get("tags") is None: return False account_id, partition = get_aws_account_info(module) - arn = 'arn:{0}:glue:{1}:{2}:job/{3}'.format(partition, module.region, account_id, module.params.get('name')) + arn = "arn:{0}:glue:{1}:{2}:job/{3}".format(partition, module.region, account_id, module.params.get("name")) try: - existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get('Tags', {}) + existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get("Tags", {}) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: if module.check_mode: existing_tags = {} else: - module.fail_json_aws(e, msg='Unable to get tags for Glue job %s' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to get tags for Glue job %s" % module.params.get("name")) - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, module.params.get('tags'), module.params.get('purge_tags')) + tags_to_add, tags_to_remove = compare_aws_tags( + existing_tags, module.params.get("tags"), module.params.get("purge_tags") + ) if tags_to_remove: changed = True @@ -331,7 +340,7 @@ def ensure_tags(connection, module, glue_job): try: connection.untag_resource(aws_retry=True, ResourceArn=arn, TagsToRemove=tags_to_remove) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Glue job %s' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to set tags for Glue job %s" % module.params.get("name")) if tags_to_add: changed = True @@ -339,7 +348,7 @@ def ensure_tags(connection, module, glue_job): try: connection.tag_resource(aws_retry=True, ResourceArn=arn, TagsToAdd=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Glue job %s' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to set tags for Glue job %s" % module.params.get("name")) return changed @@ -356,42 +365,45 @@ def create_or_update_glue_job(connection, module, glue_job): changed = False params = dict() - params['Name'] = module.params.get("name") - params['Role'] = module.params.get("role") + params["Name"] = module.params.get("name") + params["Role"] = module.params.get("role") if module.params.get("allocated_capacity") is not None: - params['AllocatedCapacity'] = module.params.get("allocated_capacity") + params["AllocatedCapacity"] = module.params.get("allocated_capacity") if module.params.get("command_script_location") is not None: - params['Command'] = {'Name': module.params.get("command_name"), 'ScriptLocation': module.params.get("command_script_location")} + params["Command"] = { + "Name": module.params.get("command_name"), + "ScriptLocation": module.params.get("command_script_location"), + } if module.params.get("command_python_version") is not None: - params['Command']['PythonVersion'] = module.params.get("command_python_version") + params["Command"]["PythonVersion"] = module.params.get("command_python_version") if module.params.get("connections") is not None: - params['Connections'] = {'Connections': module.params.get("connections")} + params["Connections"] = {"Connections": module.params.get("connections")} if module.params.get("default_arguments") is not None: - params['DefaultArguments'] = module.params.get("default_arguments") + params["DefaultArguments"] = module.params.get("default_arguments") if module.params.get("description") is not None: - params['Description'] = module.params.get("description") + params["Description"] = module.params.get("description") if module.params.get("glue_version") is not None: - params['GlueVersion'] = module.params.get("glue_version") + params["GlueVersion"] = module.params.get("glue_version") if module.params.get("max_concurrent_runs") is not None: - params['ExecutionProperty'] = {'MaxConcurrentRuns': module.params.get("max_concurrent_runs")} + params["ExecutionProperty"] = {"MaxConcurrentRuns": module.params.get("max_concurrent_runs")} if module.params.get("max_retries") is not None: - params['MaxRetries'] = module.params.get("max_retries") + params["MaxRetries"] = module.params.get("max_retries") if module.params.get("timeout") is not None: - params['Timeout'] = module.params.get("timeout") + params["Timeout"] = module.params.get("timeout") if module.params.get("glue_version") is not None: - params['GlueVersion'] = module.params.get("glue_version") + params["GlueVersion"] = module.params.get("glue_version") if module.params.get("worker_type") is not None: - params['WorkerType'] = module.params.get("worker_type") + params["WorkerType"] = module.params.get("worker_type") if module.params.get("number_of_workers") is not None: - params['NumberOfWorkers'] = module.params.get("number_of_workers") + params["NumberOfWorkers"] = module.params.get("number_of_workers") # If glue_job is not None then check if it needs to be modified, else create it if glue_job: if _compare_glue_job_params(params, glue_job): try: # Update job needs slightly modified params - update_params = {'JobName': params['Name'], 'JobUpdate': copy.deepcopy(params)} - del update_params['JobUpdate']['Name'] + update_params = {"JobName": params["Name"], "JobUpdate": copy.deepcopy(params)} + del update_params["JobUpdate"]["Name"] if not module.check_mode: connection.update_job(aws_retry=True, **update_params) changed = True @@ -405,11 +417,11 @@ def create_or_update_glue_job(connection, module, glue_job): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) - glue_job = _get_glue_job(connection, module, params['Name']) + glue_job = _get_glue_job(connection, module, params["Name"]) changed |= ensure_tags(connection, module, glue_job) - module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_job or {}, ignore_list=['DefaultArguments'])) + module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_job or {}, ignore_list=["DefaultArguments"])) def delete_glue_job(connection, module, glue_job): @@ -426,7 +438,7 @@ def delete_glue_job(connection, module, glue_job): if glue_job: try: if not module.check_mode: - connection.delete_job(aws_retry=True, JobName=glue_job['Name']) + connection.delete_job(aws_retry=True, JobName=glue_job["Name"]) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) @@ -435,49 +447,45 @@ def delete_glue_job(connection, module, glue_job): def main(): - - argument_spec = ( - dict( - allocated_capacity=dict(type='int'), - command_name=dict(type='str', default='glueetl'), - command_python_version=dict(type='str'), - command_script_location=dict(type='str'), - connections=dict(type='list', elements='str'), - default_arguments=dict(type='dict'), - description=dict(type='str'), - glue_version=dict(type='str'), - max_concurrent_runs=dict(type='int'), - max_retries=dict(type='int'), - name=dict(required=True, type='str'), - number_of_workers=dict(type='int'), - purge_tags=dict(type='bool', default=True), - role=dict(type='str'), - state=dict(required=True, choices=['present', 'absent'], type='str'), - tags=dict(type='dict', aliases=['resource_tags']), - timeout=dict(type='int'), - worker_type=dict(choices=['Standard', 'G.1X', 'G.2X'], type='str'), - ) + argument_spec = dict( + allocated_capacity=dict(type="int"), + command_name=dict(type="str", default="glueetl"), + command_python_version=dict(type="str"), + command_script_location=dict(type="str"), + connections=dict(type="list", elements="str"), + default_arguments=dict(type="dict"), + description=dict(type="str"), + glue_version=dict(type="str"), + max_concurrent_runs=dict(type="int"), + max_retries=dict(type="int"), + name=dict(required=True, type="str"), + number_of_workers=dict(type="int"), + purge_tags=dict(type="bool", default=True), + role=dict(type="str"), + state=dict(required=True, choices=["present", "absent"], type="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + timeout=dict(type="int"), + worker_type=dict(choices=["Standard", "G.1X", "G.2X"], type="str"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[ - ('state', 'present', ['role', 'command_script_location']) - ], - supports_check_mode=True - ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[("state", "present", ["role", "command_script_location"])], + supports_check_mode=True, + ) retry_decorator = AWSRetry.jittered_backoff(retries=10) - connection = module.client('glue', retry_decorator=retry_decorator) + connection = module.client("glue", retry_decorator=retry_decorator) state = module.params.get("state") glue_job = _get_glue_job(connection, module, module.params.get("name")) - if state == 'present': + if state == "present": create_or_update_glue_job(connection, module, glue_job) else: delete_glue_job(connection, module, glue_job) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/iam_access_key.py b/plugins/modules/iam_access_key.py index af472fbe8c6..a8f03d7bced 100644 --- a/plugins/modules/iam_access_key.py +++ b/plugins/modules/iam_access_key.py @@ -149,14 +149,15 @@ def delete_access_key(access_keys, user, access_key_id): UserName=user, AccessKeyId=access_key_id, ) - except is_boto3_error_code('NoSuchEntityException'): + except is_boto3_error_code("NoSuchEntityException"): # Generally occurs when race conditions have happened and someone # deleted the key while we were checking to see if it existed. return False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws( - e, msg='Failed to delete access key "{0}" for user "{1}"'.format(access_key_id, user) - ) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to delete access key "{0}" for user "{1}"'.format(access_key_id, user)) return True @@ -171,9 +172,9 @@ def update_access_key(access_keys, user, access_key_id, enabled): access_key = access_keys.get(access_key_id) if enabled is not None: - desired_status = 'Active' if enabled else 'Inactive' - if access_key.get('status') != desired_status: - changes['Status'] = desired_status + desired_status = "Active" if enabled else "Inactive" + if access_key.get("status") != desired_status: + changes["Status"] = desired_status if not changes: return False @@ -182,15 +183,11 @@ def update_access_key(access_keys, user, access_key_id, enabled): return True try: - client.update_access_key( - aws_retry=True, - UserName=user, - AccessKeyId=access_key_id, - **changes - ) + client.update_access_key(aws_retry=True, UserName=user, AccessKeyId=access_key_id, **changes) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( - e, changes=changes, + e, + changes=changes, msg='Failed to update access key "{0}" for user "{1}"'.format(access_key_id, user), ) return True @@ -201,7 +198,7 @@ def create_access_key(access_keys, user, rotate_keys, enabled): oldest_key = False if len(access_keys) > 1 and rotate_keys: - sorted_keys = sorted(list(access_keys), key=lambda k: access_keys[k].get('create_date', None)) + sorted_keys = sorted(list(access_keys), key=lambda k: access_keys[k].get("create_date", None)) oldest_key = sorted_keys[0] changed |= delete_access_key(access_keys, user, oldest_key) @@ -215,18 +212,18 @@ def create_access_key(access_keys, user, rotate_keys, enabled): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to create access key for user "{0}"'.format(user)) results = camel_dict_to_snake_dict(results) - access_key = results.get('access_key') + access_key = results.get("access_key") access_key = normalize_boto3_result(access_key) # Update settings which can't be managed on creation if enabled is False: - access_key_id = access_key['access_key_id'] + access_key_id = access_key["access_key_id"] access_keys = {access_key_id: access_key} update_access_key(access_keys, user, access_key_id, enabled) - access_key['status'] = 'Inactive' + access_key["status"] = "Inactive" if oldest_key: - access_key['deleted_access_key'] = oldest_key + access_key["deleted_access_key"] = oldest_key return access_key @@ -235,67 +232,64 @@ def get_access_keys(user): try: results = client.list_access_keys(aws_retry=True, UserName=user) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg='Failed to get access keys for user "{0}"'.format(user) - ) + module.fail_json_aws(e, msg='Failed to get access keys for user "{0}"'.format(user)) if not results: return None results = camel_dict_to_snake_dict(results) - access_keys = results.get('access_key_metadata', []) + access_keys = results.get("access_key_metadata", []) if not access_keys: return [] access_keys = normalize_boto3_result(access_keys) - access_keys = {k['access_key_id']: k for k in access_keys} + access_keys = {k["access_key_id"]: k for k in access_keys} return access_keys def main(): - global module global client argument_spec = dict( - user_name=dict(required=True, type='str', aliases=['username']), - id=dict(required=False, type='str'), - state=dict(required=False, choices=['present', 'absent'], default='present'), - active=dict(required=False, type='bool', aliases=['enabled']), - rotate_keys=dict(required=False, type='bool', default=False), + user_name=dict(required=True, type="str", aliases=["username"]), + id=dict(required=False, type="str"), + state=dict(required=False, choices=["present", "absent"], default="present"), + active=dict(required=False, type="bool", aliases=["enabled"]), + rotate_keys=dict(required=False, type="bool", default=False), ) required_if = [ - ['state', 'absent', ('id')], + ["state", "absent", ("id")], ] mutually_exclusive = [ - ['rotate_keys', 'id'], + ["rotate_keys", "id"], ] module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, ) - client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) changed = False - state = module.params.get('state') - user = module.params.get('user_name') - access_key_id = module.params.get('id') - rotate_keys = module.params.get('rotate_keys') - enabled = module.params.get('active') + state = module.params.get("state") + user = module.params.get("user_name") + access_key_id = module.params.get("id") + rotate_keys = module.params.get("rotate_keys") + enabled = module.params.get("active") access_keys = get_access_keys(user) results = dict() - if state == 'absent': + if state == "absent": changed |= delete_access_key(access_keys, user, access_key_id) else: # If we have an ID then we should try to update it if access_key_id: changed |= update_access_key(access_keys, user, access_key_id, enabled) access_keys = get_access_keys(user) - results['access_key'] = access_keys.get(access_key_id, None) + results["access_key"] = access_keys.get(access_key_id, None) # Otherwise we try to create a new one else: secret_key = create_access_key(access_keys, user, rotate_keys, enabled) @@ -303,15 +297,15 @@ def main(): changed |= secret_key else: changed = True - results['access_key_id'] = secret_key.get('access_key_id', None) - results['secret_access_key'] = secret_key.pop('secret_access_key', None) - results['deleted_access_key_id'] = secret_key.pop('deleted_access_key', None) + results["access_key_id"] = secret_key.get("access_key_id", None) + results["secret_access_key"] = secret_key.pop("secret_access_key", None) + results["deleted_access_key_id"] = secret_key.pop("deleted_access_key", None) if secret_key: - results['access_key'] = secret_key + results["access_key"] = secret_key results = scrub_none_parameters(results) module.exit_json(changed=changed, **results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/iam_access_key_info.py b/plugins/modules/iam_access_key_info.py index 6573e657a18..22bbd564cb0 100644 --- a/plugins/modules/iam_access_key_info.py +++ b/plugins/modules/iam_access_key_info.py @@ -85,44 +85,38 @@ def get_access_keys(user): try: results = client.list_access_keys(aws_retry=True, UserName=user) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg='Failed to get access keys for user "{0}"'.format(user) - ) + module.fail_json_aws(e, msg='Failed to get access keys for user "{0}"'.format(user)) if not results: return None results = camel_dict_to_snake_dict(results) - access_keys = results.get('access_key_metadata', []) + access_keys = results.get("access_key_metadata", []) if not access_keys: return [] access_keys = normalize_boto3_result(access_keys) - access_keys = sorted(access_keys, key=lambda d: d.get('create_date', None)) + access_keys = sorted(access_keys, key=lambda d: d.get("create_date", None)) return access_keys def main(): - global module global client argument_spec = dict( - user_name=dict(required=True, type='str', aliases=['username']), + user_name=dict(required=True, type="str", aliases=["username"]), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) changed = False - user = module.params.get('user_name') + user = module.params.get("user_name") access_keys = get_access_keys(user) module.exit_json(changed=changed, access_keys=access_keys) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/iam_group.py b/plugins/modules/iam_group.py index 9dc43ec0a94..357671dbdc6 100644 --- a/plugins/modules/iam_group.py +++ b/plugins/modules/iam_group.py @@ -178,14 +178,13 @@ def compare_attached_group_policies(current_attached_policies, new_attached_policies): - # If new_attached_policies is None it means we want to remove all policies if len(current_attached_policies) > 0 and new_attached_policies is None: return False current_attached_policies_arn_list = [] for policy in current_attached_policies: - current_attached_policies_arn_list.append(policy['PolicyArn']) + current_attached_policies_arn_list.append(policy["PolicyArn"]) if set(current_attached_policies_arn_list) == set(new_attached_policies): return True @@ -194,7 +193,6 @@ def compare_attached_group_policies(current_attached_policies, new_attached_poli def compare_group_members(current_group_members, new_group_members): - # If new_attached_policies is None it means we want to remove all policies if len(current_group_members) > 0 and new_group_members is None: return False @@ -205,16 +203,15 @@ def compare_group_members(current_group_members, new_group_members): def convert_friendly_names_to_arns(connection, module, policy_names): - - if not any(not policy.startswith('arn:') for policy in policy_names if policy is not None): + if not any(not policy.startswith("arn:") for policy in policy_names if policy is not None): return policy_names allpolicies = {} - paginator = connection.get_paginator('list_policies') - policies = paginator.paginate().build_full_result()['Policies'] + paginator = connection.get_paginator("list_policies") + policies = paginator.paginate().build_full_result()["Policies"] for policy in policies: - allpolicies[policy['PolicyName']] = policy['Arn'] - allpolicies[policy['Arn']] = policy['Arn'] + allpolicies[policy["PolicyName"]] = policy["Arn"] + allpolicies[policy["Arn"]] = policy["Arn"] try: return [allpolicies[policy] for policy in policy_names] except KeyError as e: @@ -222,20 +219,19 @@ def convert_friendly_names_to_arns(connection, module, policy_names): def create_or_update_group(connection, module): - params = dict() - params['GroupName'] = module.params.get('name') - managed_policies = module.params.get('managed_policies') - users = module.params.get('users') - purge_users = module.params.get('purge_users') - purge_policies = module.params.get('purge_policies') + params["GroupName"] = module.params.get("name") + managed_policies = module.params.get("managed_policies") + users = module.params.get("users") + purge_users = module.params.get("purge_users") + purge_policies = module.params.get("purge_policies") changed = False if managed_policies: managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies) # Get group try: - group = get_group(connection, module, params['GroupName']) + group = get_group(connection, module, params["GroupName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get group") @@ -252,11 +248,11 @@ def create_or_update_group(connection, module): module.fail_json_aws(e, msg="Couldn't create group") # Manage managed policies - current_attached_policies = get_attached_policy_list(connection, module, params['GroupName']) + current_attached_policies = get_attached_policy_list(connection, module, params["GroupName"]) if not compare_attached_group_policies(current_attached_policies, managed_policies): current_attached_policies_arn_list = [] for policy in current_attached_policies: - current_attached_policies_arn_list.append(policy['PolicyArn']) + current_attached_policies_arn_list.append(policy["PolicyArn"]) # If managed_policies has a single empty element we want to remove all attached policies if purge_policies: @@ -265,9 +261,9 @@ def create_or_update_group(connection, module): changed = True if not module.check_mode: try: - connection.detach_group_policy(GroupName=params['GroupName'], PolicyArn=policy_arn) + connection.detach_group_policy(GroupName=params["GroupName"], PolicyArn=policy_arn) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach policy from group %s" % params['GroupName']) + module.fail_json_aws(e, msg="Couldn't detach policy from group %s" % params["GroupName"]) # If there are policies to adjust that aren't in the current list, then things have changed # Otherwise the only changes were in purging above if set(managed_policies) - set(current_attached_policies_arn_list): @@ -276,22 +272,21 @@ def create_or_update_group(connection, module): if managed_policies != [None] and not module.check_mode: for policy_arn in managed_policies: try: - connection.attach_group_policy(GroupName=params['GroupName'], PolicyArn=policy_arn) + connection.attach_group_policy(GroupName=params["GroupName"], PolicyArn=policy_arn) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't attach policy to group %s" % params['GroupName']) + module.fail_json_aws(e, msg="Couldn't attach policy to group %s" % params["GroupName"]) # Manage group memberships try: - current_group_members = get_group(connection, module, params['GroupName'])['Users'] + current_group_members = get_group(connection, module, params["GroupName"])["Users"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName']) + module.fail_json_aws(e, "Couldn't get group %s" % params["GroupName"]) current_group_members_list = [] for member in current_group_members: - current_group_members_list.append(member['UserName']) + current_group_members_list.append(member["UserName"]) if not compare_group_members(current_group_members_list, users): - if purge_users: for user in list(set(current_group_members_list) - set(users)): # Ensure we mark things have changed if any user gets purged @@ -299,9 +294,11 @@ def create_or_update_group(connection, module): # Skip actions for check mode if not module.check_mode: try: - connection.remove_user_from_group(GroupName=params['GroupName'], UserName=user) + connection.remove_user_from_group(GroupName=params["GroupName"], UserName=user) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't remove user %s from group %s" % (user, params['GroupName'])) + module.fail_json_aws( + e, msg="Couldn't remove user %s from group %s" % (user, params["GroupName"]) + ) # If there are users to adjust that aren't in the current list, then things have changed # Otherwise the only changes were in purging above if set(users) - set(current_group_members_list): @@ -310,30 +307,29 @@ def create_or_update_group(connection, module): if users != [None] and not module.check_mode: for user in users: try: - connection.add_user_to_group(GroupName=params['GroupName'], UserName=user) + connection.add_user_to_group(GroupName=params["GroupName"], UserName=user) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't add user %s to group %s" % (user, params['GroupName'])) + module.fail_json_aws(e, msg="Couldn't add user %s to group %s" % (user, params["GroupName"])) if module.check_mode: module.exit_json(changed=changed) # Get the group again try: - group = get_group(connection, module, params['GroupName']) + group = get_group(connection, module, params["GroupName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName']) + module.fail_json_aws(e, "Couldn't get group %s" % params["GroupName"]) module.exit_json(changed=changed, iam_group=camel_dict_to_snake_dict(group)) def destroy_group(connection, module): - params = dict() - params['GroupName'] = module.params.get('name') + params["GroupName"] = module.params.get("name") try: - group = get_group(connection, module, params['GroupName']) + group = get_group(connection, module, params["GroupName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName']) + module.fail_json_aws(e, "Couldn't get group %s" % params["GroupName"]) if group: # Check mode means we would remove this group if module.check_mode: @@ -341,29 +337,29 @@ def destroy_group(connection, module): # Remove any attached policies otherwise deletion fails try: - for policy in get_attached_policy_list(connection, module, params['GroupName']): - connection.detach_group_policy(GroupName=params['GroupName'], PolicyArn=policy['PolicyArn']) + for policy in get_attached_policy_list(connection, module, params["GroupName"]): + connection.detach_group_policy(GroupName=params["GroupName"], PolicyArn=policy["PolicyArn"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't remove policy from group %s" % params['GroupName']) + module.fail_json_aws(e, msg="Couldn't remove policy from group %s" % params["GroupName"]) # Remove any users in the group otherwise deletion fails current_group_members_list = [] try: - current_group_members = get_group(connection, module, params['GroupName'])['Users'] + current_group_members = get_group(connection, module, params["GroupName"])["Users"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName']) + module.fail_json_aws(e, "Couldn't get group %s" % params["GroupName"]) for member in current_group_members: - current_group_members_list.append(member['UserName']) + current_group_members_list.append(member["UserName"]) for user in current_group_members_list: try: - connection.remove_user_from_group(GroupName=params['GroupName'], UserName=user) + connection.remove_user_from_group(GroupName=params["GroupName"], UserName=user) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't remove user %s from group %s" % (user, params['GroupName'])) + module.fail_json_aws(e, "Couldn't remove user %s from group %s" % (user, params["GroupName"])) try: connection.delete_group(**params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't delete group %s" % params['GroupName']) + module.fail_json_aws(e, "Couldn't delete group %s" % params["GroupName"]) else: module.exit_json(changed=False) @@ -374,47 +370,45 @@ def destroy_group(connection, module): @AWSRetry.exponential_backoff() def get_group(connection, module, name): try: - paginator = connection.get_paginator('get_group') + paginator = connection.get_paginator("get_group") return paginator.paginate(GroupName=name).build_full_result() - except is_boto3_error_code('NoSuchEntity'): + except is_boto3_error_code("NoSuchEntity"): return None @AWSRetry.exponential_backoff() def get_attached_policy_list(connection, module, name): - try: - paginator = connection.get_paginator('list_attached_group_policies') - return paginator.paginate(GroupName=name).build_full_result()['AttachedPolicies'] - except is_boto3_error_code('NoSuchEntity'): + paginator = connection.get_paginator("list_attached_group_policies") + return paginator.paginate(GroupName=name).build_full_result()["AttachedPolicies"] + except is_boto3_error_code("NoSuchEntity"): return None def main(): - argument_spec = dict( name=dict(required=True), - managed_policies=dict(default=[], type='list', aliases=['managed_policy'], elements='str'), - users=dict(default=[], type='list', elements='str'), - state=dict(choices=['present', 'absent'], required=True), - purge_users=dict(default=False, type='bool'), - purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies']) + managed_policies=dict(default=[], type="list", aliases=["managed_policy"], elements="str"), + users=dict(default=[], type="list", elements="str"), + state=dict(choices=["present", "absent"], required=True), + purge_users=dict(default=False, type="bool"), + purge_policies=dict(default=False, type="bool", aliases=["purge_policy", "purge_managed_policies"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, ) - connection = module.client('iam') + connection = module.client("iam") state = module.params.get("state") - if state == 'present': + if state == "present": create_or_update_group(connection, module) else: destroy_group(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/iam_managed_policy.py b/plugins/modules/iam_managed_policy.py index f590fcf9d64..0f6189ca454 100644 --- a/plugins/modules/iam_managed_policy.py +++ b/plugins/modules/iam_managed_policy.py @@ -141,8 +141,8 @@ @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def list_policies_with_backoff(): - paginator = client.get_paginator('list_policies') - return paginator.paginate(Scope='Local').build_full_result() + paginator = client.get_paginator("list_policies") + return paginator.paginate(Scope="Local").build_full_result() def get_policy_by_name(name): @@ -150,22 +150,23 @@ def get_policy_by_name(name): response = list_policies_with_backoff() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't list policies") - for policy in response['Policies']: - if policy['PolicyName'] == name: + for policy in response["Policies"]: + if policy["PolicyName"] == name: return policy return None def delete_oldest_non_default_version(policy): try: - versions = [v for v in client.list_policy_versions(PolicyArn=policy['Arn'])['Versions'] - if not v['IsDefaultVersion']] + versions = [ + v for v in client.list_policy_versions(PolicyArn=policy["Arn"])["Versions"] if not v["IsDefaultVersion"] + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't list policy versions") - versions.sort(key=lambda v: v['CreateDate'], reverse=True) + versions.sort(key=lambda v: v["CreateDate"], reverse=True) for v in versions[-1:]: try: - client.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId']) + client.delete_policy_version(PolicyArn=policy["Arn"], VersionId=v["VersionId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete policy version") @@ -173,15 +174,17 @@ def delete_oldest_non_default_version(policy): # This needs to return policy_version, changed def get_or_create_policy_version(policy, policy_document): try: - versions = client.list_policy_versions(PolicyArn=policy['Arn'])['Versions'] + versions = client.list_policy_versions(PolicyArn=policy["Arn"])["Versions"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't list policy versions") for v in versions: try: - document = client.get_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId'])['PolicyVersion']['Document'] + document = client.get_policy_version(PolicyArn=policy["Arn"], VersionId=v["VersionId"])["PolicyVersion"][ + "Document" + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get policy version {0}".format(v['VersionId'])) + module.fail_json_aws(e, msg="Couldn't get policy version {0}".format(v["VersionId"])) if module.check_mode and compare_policies(document, json.loads(to_native(policy_document))): return v, True @@ -197,23 +200,28 @@ def get_or_create_policy_version(policy, policy_document): # and if that doesn't work, delete the oldest non default policy version # and try again. try: - version = client.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion'] + version = client.create_policy_version(PolicyArn=policy["Arn"], PolicyDocument=policy_document)["PolicyVersion"] return version, True - except is_boto3_error_code('LimitExceeded'): + except is_boto3_error_code("LimitExceeded"): delete_oldest_non_default_version(policy) try: - version = client.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion'] + version = client.create_policy_version(PolicyArn=policy["Arn"], PolicyDocument=policy_document)[ + "PolicyVersion" + ] return version, True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as second_e: module.fail_json_aws(second_e, msg="Couldn't create policy version") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't create policy version") def set_if_default(policy, policy_version, is_default): - if is_default and not policy_version['IsDefaultVersion']: + if is_default and not policy_version["IsDefaultVersion"]: try: - client.set_default_policy_version(PolicyArn=policy['Arn'], VersionId=policy_version['VersionId']) + client.set_default_policy_version(PolicyArn=policy["Arn"], VersionId=policy_version["VersionId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't set default policy version") return True @@ -223,13 +231,14 @@ def set_if_default(policy, policy_version, is_default): def set_if_only(policy, policy_version, is_only): if is_only: try: - versions = [v for v in client.list_policy_versions(PolicyArn=policy['Arn'])[ - 'Versions'] if not v['IsDefaultVersion']] + versions = [ + v for v in client.list_policy_versions(PolicyArn=policy["Arn"])["Versions"] if not v["IsDefaultVersion"] + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't list policy versions") for v in versions: try: - client.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId']) + client.delete_policy_version(PolicyArn=policy["Arn"], VersionId=v["VersionId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete policy version") return len(versions) > 0 @@ -238,39 +247,39 @@ def set_if_only(policy, policy_version, is_only): def detach_all_entities(policy, **kwargs): try: - entities = client.list_entities_for_policy(PolicyArn=policy['Arn'], **kwargs) + entities = client.list_entities_for_policy(PolicyArn=policy["Arn"], **kwargs) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach list entities for policy {0}".format(policy['PolicyName'])) + module.fail_json_aws(e, msg="Couldn't detach list entities for policy {0}".format(policy["PolicyName"])) - for g in entities['PolicyGroups']: + for g in entities["PolicyGroups"]: try: - client.detach_group_policy(PolicyArn=policy['Arn'], GroupName=g['GroupName']) + client.detach_group_policy(PolicyArn=policy["Arn"], GroupName=g["GroupName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach group policy {0}".format(g['GroupName'])) - for u in entities['PolicyUsers']: + module.fail_json_aws(e, msg="Couldn't detach group policy {0}".format(g["GroupName"])) + for u in entities["PolicyUsers"]: try: - client.detach_user_policy(PolicyArn=policy['Arn'], UserName=u['UserName']) + client.detach_user_policy(PolicyArn=policy["Arn"], UserName=u["UserName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach user policy {0}".format(u['UserName'])) - for r in entities['PolicyRoles']: + module.fail_json_aws(e, msg="Couldn't detach user policy {0}".format(u["UserName"])) + for r in entities["PolicyRoles"]: try: - client.detach_role_policy(PolicyArn=policy['Arn'], RoleName=r['RoleName']) + client.detach_role_policy(PolicyArn=policy["Arn"], RoleName=r["RoleName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach role policy {0}".format(r['RoleName'])) - if entities['IsTruncated']: - detach_all_entities(policy, marker=entities['Marker']) + module.fail_json_aws(e, msg="Couldn't detach role policy {0}".format(r["RoleName"])) + if entities["IsTruncated"]: + detach_all_entities(policy, marker=entities["Marker"]) def create_or_update_policy(existing_policy): - name = module.params.get('policy_name') - description = module.params.get('policy_description') - default = module.params.get('make_default') - only = module.params.get('only_version') + name = module.params.get("policy_name") + description = module.params.get("policy_description") + default = module.params.get("make_default") + only = module.params.get("only_version") policy = None - if module.params.get('policy') is not None: - policy = json.dumps(json.loads(module.params.get('policy'))) + if module.params.get("policy") is not None: + policy = json.dumps(json.loads(module.params.get("policy"))) if existing_policy is None: if module.check_mode: @@ -278,11 +287,11 @@ def create_or_update_policy(existing_policy): # Create policy when none already exists try: - rvalue = client.create_policy(PolicyName=name, Path='/', PolicyDocument=policy, Description=description) + rvalue = client.create_policy(PolicyName=name, Path="/", PolicyDocument=policy, Description=description) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create policy {0}".format(name)) - module.exit_json(changed=True, policy=camel_dict_to_snake_dict(rvalue['Policy'])) + module.exit_json(changed=True, policy=camel_dict_to_snake_dict(rvalue["Policy"])) else: policy_version, changed = get_or_create_policy_version(existing_policy, policy) changed = set_if_default(existing_policy, policy_version, default) or changed @@ -291,7 +300,7 @@ def create_or_update_policy(existing_policy): # If anything has changed we need to refresh the policy if changed: try: - updated_policy = client.get_policy(PolicyArn=existing_policy['Arn'])['Policy'] + updated_policy = client.get_policy(PolicyArn=existing_policy["Arn"])["Policy"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg="Couldn't get policy") @@ -310,21 +319,20 @@ def delete_policy(existing_policy): detach_all_entities(existing_policy) # Delete Versions try: - versions = client.list_policy_versions(PolicyArn=existing_policy['Arn'])['Versions'] + versions = client.list_policy_versions(PolicyArn=existing_policy["Arn"])["Versions"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't list policy versions") for v in versions: - if not v['IsDefaultVersion']: + if not v["IsDefaultVersion"]: try: - client.delete_policy_version(PolicyArn=existing_policy['Arn'], VersionId=v['VersionId']) + client.delete_policy_version(PolicyArn=existing_policy["Arn"], VersionId=v["VersionId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Couldn't delete policy version {0}".format(v['VersionId'])) + module.fail_json_aws(e, msg="Couldn't delete policy version {0}".format(v["VersionId"])) # Delete policy try: - client.delete_policy(PolicyArn=existing_policy['Arn']) + client.delete_policy(PolicyArn=existing_policy["Arn"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete policy {0}".format(existing_policy['PolicyName'])) + module.fail_json_aws(e, msg="Couldn't delete policy {0}".format(existing_policy["PolicyName"])) # This is the one case where we will return the old policy module.exit_json(changed=True, policy=camel_dict_to_snake_dict(existing_policy)) @@ -338,34 +346,34 @@ def main(): argument_spec = dict( policy_name=dict(required=True), - policy_description=dict(default=''), - policy=dict(type='json'), - make_default=dict(type='bool', default=True), - only_version=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']), + policy_description=dict(default=""), + policy=dict(type="json"), + make_default=dict(type="bool", default=True), + only_version=dict(type="bool", default=False), + state=dict(default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_if=[['state', 'present', ['policy']]], - supports_check_mode=True + required_if=[["state", "present", ["policy"]]], + supports_check_mode=True, ) - name = module.params.get('policy_name') - state = module.params.get('state') + name = module.params.get("policy_name") + state = module.params.get("state") try: - client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") existing_policy = get_policy_by_name(name) - if state == 'present': + if state == "present": create_or_update_policy(existing_policy) else: delete_policy(existing_policy) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/iam_mfa_device_info.py b/plugins/modules/iam_mfa_device_info.py index c0c434a9be9..44b38ab90f4 100644 --- a/plugins/modules/iam_mfa_device_info.py +++ b/plugins/modules/iam_mfa_device_info.py @@ -67,12 +67,12 @@ def list_mfa_devices(connection, module): - user_name = module.params.get('user_name') + user_name = module.params.get("user_name") changed = False args = {} if user_name is not None: - args['UserName'] = user_name + args["UserName"] = user_name try: response = connection.list_mfa_devices(**args) except ClientError as e: @@ -92,12 +92,12 @@ def main(): ) try: - connection = module.client('iam') + connection = module.client("iam") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") list_mfa_devices(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/iam_password_policy.py b/plugins/modules/iam_password_policy.py index a980511c2fa..7c93da4139f 100644 --- a/plugins/modules/iam_password_policy.py +++ b/plugins/modules/iam_password_policy.py @@ -109,16 +109,23 @@ class IAMConnection(object): def __init__(self, module): try: - self.connection = module.resource('iam') + self.connection = module.resource("iam") self.module = module except Exception as e: module.fail_json(msg="Failed to connect to AWS: %s" % str(e)) def policy_to_dict(self, policy): policy_attributes = [ - 'allow_users_to_change_password', 'expire_passwords', 'hard_expiry', - 'max_password_age', 'minimum_password_length', 'password_reuse_prevention', - 'require_lowercase_characters', 'require_numbers', 'require_symbols', 'require_uppercase_characters' + "allow_users_to_change_password", + "expire_passwords", + "hard_expiry", + "max_password_age", + "minimum_password_length", + "password_reuse_prevention", + "require_lowercase_characters", + "require_numbers", + "require_symbols", + "require_uppercase_characters", ] ret = {} for attr in policy_attributes: @@ -126,15 +133,15 @@ def policy_to_dict(self, policy): return ret def update_password_policy(self, module, policy): - min_pw_length = module.params.get('min_pw_length') - require_symbols = module.params.get('require_symbols') - require_numbers = module.params.get('require_numbers') - require_uppercase = module.params.get('require_uppercase') - require_lowercase = module.params.get('require_lowercase') - allow_pw_change = module.params.get('allow_pw_change') - pw_max_age = module.params.get('pw_max_age') - pw_reuse_prevent = module.params.get('pw_reuse_prevent') - pw_expire = module.params.get('pw_expire') + min_pw_length = module.params.get("min_pw_length") + require_symbols = module.params.get("require_symbols") + require_numbers = module.params.get("require_numbers") + require_uppercase = module.params.get("require_uppercase") + require_lowercase = module.params.get("require_lowercase") + allow_pw_change = module.params.get("allow_pw_change") + pw_max_age = module.params.get("pw_max_age") + pw_reuse_prevent = module.params.get("pw_reuse_prevent") + pw_expire = module.params.get("pw_expire") update_parameters = dict( MinimumPasswordLength=min_pw_length, @@ -143,7 +150,7 @@ def update_password_policy(self, module, policy): RequireUppercaseCharacters=require_uppercase, RequireLowercaseCharacters=require_lowercase, AllowUsersToChangePassword=allow_pw_change, - HardExpiry=pw_expire + HardExpiry=pw_expire, ) if pw_reuse_prevent: update_parameters.update(PasswordReusePrevention=pw_reuse_prevent) @@ -162,15 +169,18 @@ def update_password_policy(self, module, policy): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't update IAM Password Policy") - changed = (original_policy != updated_policy) + changed = original_policy != updated_policy return (changed, updated_policy, camel_dict_to_snake_dict(results)) def delete_password_policy(self, policy): try: results = policy.delete() - except is_boto3_error_code('NoSuchEntity'): - self.module.exit_json(changed=False, task_status={'IAM': "Couldn't find IAM Password Policy"}) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except is_boto3_error_code("NoSuchEntity"): + self.module.exit_json(changed=False, task_status={"IAM": "Couldn't find IAM Password Policy"}) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except self.module.fail_json_aws(e, msg="Couldn't delete IAM Password Policy") return camel_dict_to_snake_dict(results) @@ -178,16 +188,16 @@ def delete_password_policy(self, policy): def main(): module = AnsibleAWSModule( argument_spec={ - 'state': dict(choices=['present', 'absent'], required=True), - 'min_pw_length': dict(type='int', aliases=['minimum_password_length'], default=6), - 'require_symbols': dict(type='bool', default=False), - 'require_numbers': dict(type='bool', default=False), - 'require_uppercase': dict(type='bool', default=False), - 'require_lowercase': dict(type='bool', default=False), - 'allow_pw_change': dict(type='bool', aliases=['allow_password_change'], default=False), - 'pw_max_age': dict(type='int', aliases=['password_max_age'], default=0), - 'pw_reuse_prevent': dict(type='int', aliases=['password_reuse_prevent', 'prevent_reuse'], default=0), - 'pw_expire': dict(type='bool', aliases=['password_expire', 'expire'], default=False), + "state": dict(choices=["present", "absent"], required=True), + "min_pw_length": dict(type="int", aliases=["minimum_password_length"], default=6), + "require_symbols": dict(type="bool", default=False), + "require_numbers": dict(type="bool", default=False), + "require_uppercase": dict(type="bool", default=False), + "require_lowercase": dict(type="bool", default=False), + "allow_pw_change": dict(type="bool", aliases=["allow_password_change"], default=False), + "pw_max_age": dict(type="int", aliases=["password_max_age"], default=0), + "pw_reuse_prevent": dict(type="int", aliases=["password_reuse_prevent", "prevent_reuse"], default=0), + "pw_expire": dict(type="bool", aliases=["password_expire", "expire"], default=False), }, supports_check_mode=True, ) @@ -195,16 +205,16 @@ def main(): resource = IAMConnection(module) policy = resource.connection.AccountPasswordPolicy() - state = module.params.get('state') + state = module.params.get("state") - if state == 'present': + if state == "present": (changed, new_policy, update_result) = resource.update_password_policy(module, policy) - module.exit_json(changed=changed, task_status={'IAM': update_result}, policy=new_policy) + module.exit_json(changed=changed, task_status={"IAM": update_result}, policy=new_policy) - if state == 'absent': + if state == "absent": delete_result = resource.delete_password_policy(policy) - module.exit_json(changed=True, task_status={'IAM': delete_result}) + module.exit_json(changed=True, task_status={"IAM": delete_result}) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/iam_role.py b/plugins/modules/iam_role.py index 07463cd9736..3cafe85d2cb 100644 --- a/plugins/modules/iam_role.py +++ b/plugins/modules/iam_role.py @@ -236,44 +236,44 @@ @AWSRetry.jittered_backoff() def _list_policies(client): - paginator = client.get_paginator('list_policies') - return paginator.paginate().build_full_result()['Policies'] + paginator = client.get_paginator("list_policies") + return paginator.paginate().build_full_result()["Policies"] def wait_iam_exists(module, client): if module.check_mode: return - if not module.params.get('wait'): + if not module.params.get("wait"): return - role_name = module.params.get('name') - wait_timeout = module.params.get('wait_timeout') + role_name = module.params.get("name") + wait_timeout = module.params.get("wait_timeout") delay = min(wait_timeout, 5) max_attempts = wait_timeout // delay try: - waiter = client.get_waiter('role_exists') + waiter = client.get_waiter("role_exists") waiter.wait( - WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}, + WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts}, RoleName=role_name, ) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timeout while waiting on IAM role creation') + module.fail_json_aws(e, msg="Timeout while waiting on IAM role creation") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed while waiting on IAM role creation') + module.fail_json_aws(e, msg="Failed while waiting on IAM role creation") def convert_friendly_names_to_arns(module, client, policy_names): - if not any(not policy.startswith('arn:') for policy in policy_names): + if not any(not policy.startswith("arn:") for policy in policy_names): return policy_names allpolicies = {} policies = _list_policies(client) for policy in policies: - allpolicies[policy['PolicyName']] = policy['Arn'] - allpolicies[policy['Arn']] = policy['Arn'] + allpolicies[policy["PolicyName"]] = policy["Arn"] + allpolicies[policy["Arn"]] = policy["Arn"] try: return [allpolicies[policy] for policy in policy_names] except KeyError as e: @@ -303,9 +303,12 @@ def remove_policies(module, client, policies_to_remove, role_name): try: client.detach_role_policy(RoleName=role_name, PolicyArn=policy, aws_retry=True) changed = True - except is_boto3_error_code('NoSuchEntityException'): + except is_boto3_error_code("NoSuchEntityException"): pass - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to detach policy {0} from {1}".format(policy, role_name)) return changed @@ -315,25 +318,28 @@ def remove_inline_policies(module, client, role_name): for policy in current_inline_policies: try: client.delete_role_policy(RoleName=role_name, PolicyName=policy, aws_retry=True) - except is_boto3_error_code('NoSuchEntityException'): + except is_boto3_error_code("NoSuchEntityException"): pass - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to delete policy {0} embedded in {1}".format(policy, role_name)) def generate_create_params(module): params = dict() - params['Path'] = module.params.get('path') - params['RoleName'] = module.params.get('name') - params['AssumeRolePolicyDocument'] = module.params.get('assume_role_policy_document') - if module.params.get('description') is not None: - params['Description'] = module.params.get('description') - if module.params.get('max_session_duration') is not None: - params['MaxSessionDuration'] = module.params.get('max_session_duration') - if module.params.get('boundary') is not None: - params['PermissionsBoundary'] = module.params.get('boundary') - if module.params.get('tags') is not None: - params['Tags'] = ansible_dict_to_boto3_tag_list(module.params.get('tags')) + params["Path"] = module.params.get("path") + params["RoleName"] = module.params.get("name") + params["AssumeRolePolicyDocument"] = module.params.get("assume_role_policy_document") + if module.params.get("description") is not None: + params["Description"] = module.params.get("description") + if module.params.get("max_session_duration") is not None: + params["MaxSessionDuration"] = module.params.get("max_session_duration") + if module.params.get("boundary") is not None: + params["PermissionsBoundary"] = module.params.get("boundary") + if module.params.get("tags") is not None: + params["Tags"] = ansible_dict_to_boto3_tag_list(module.params.get("tags")) return params @@ -352,7 +358,7 @@ def create_basic_role(module, client): # 'Description' is documented as key of the role returned by create_role # but appears to be an AWS bug (the value is not returned using the AWS CLI either). # Get the role after creating it. - role = get_role_with_backoff(module, client, params['RoleName']) + role = get_role_with_backoff(module, client, params["RoleName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to create role") @@ -368,10 +374,7 @@ def update_role_assumed_policy(module, client, role_name, target_assumed_policy, return True try: - client.update_assume_role_policy( - RoleName=role_name, - PolicyDocument=target_assumed_policy, - aws_retry=True) + client.update_assume_role_policy(RoleName=role_name, PolicyDocument=target_assumed_policy, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to update assume role policy for role {0}".format(role_name)) return True @@ -407,7 +410,9 @@ def update_role_max_session_duration(module, client, role_name, target_duration, return True -def update_role_permissions_boundary(module, client, role_name, target_permissions_boundary, current_permissions_boundary): +def update_role_permissions_boundary( + module, client, role_name, target_permissions_boundary, current_permissions_boundary +): # Check PermissionsBoundary if target_permissions_boundary is None or target_permissions_boundary == current_permissions_boundary: return False @@ -415,14 +420,16 @@ def update_role_permissions_boundary(module, client, role_name, target_permissio if module.check_mode: return True - if target_permissions_boundary == '': + if target_permissions_boundary == "": try: client.delete_role_permissions_boundary(RoleName=role_name, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to remove permission boundary for role {0}".format(role_name)) else: try: - client.put_role_permissions_boundary(RoleName=role_name, PermissionsBoundary=target_permissions_boundary, aws_retry=True) + client.put_role_permissions_boundary( + RoleName=role_name, PermissionsBoundary=target_permissions_boundary, aws_retry=True + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to update permission boundary for role {0}".format(role_name)) return True @@ -435,7 +442,7 @@ def update_managed_policies(module, client, role_name, managed_policies, purge_p # Get list of current attached managed policies current_attached_policies = get_attached_policy_list(module, client, role_name) - current_attached_policies_arn_list = [policy['PolicyArn'] for policy in current_attached_policies] + current_attached_policies_arn_list = [policy["PolicyArn"] for policy in current_attached_policies] if len(managed_policies) == 1 and managed_policies[0] is None: managed_policies = [] @@ -460,18 +467,17 @@ def update_managed_policies(module, client, role_name, managed_policies, purge_p def create_or_update_role(module, client): - - role_name = module.params.get('name') - assumed_policy = module.params.get('assume_role_policy_document') - create_instance_profile = module.params.get('create_instance_profile') - description = module.params.get('description') - duration = module.params.get('max_session_duration') - path = module.params.get('path') - permissions_boundary = module.params.get('boundary') - purge_tags = module.params.get('purge_tags') - tags = ansible_dict_to_boto3_tag_list(module.params.get('tags')) if module.params.get('tags') else None - purge_policies = module.params.get('purge_policies') - managed_policies = module.params.get('managed_policies') + role_name = module.params.get("name") + assumed_policy = module.params.get("assume_role_policy_document") + create_instance_profile = module.params.get("create_instance_profile") + description = module.params.get("description") + duration = module.params.get("max_session_duration") + path = module.params.get("path") + permissions_boundary = module.params.get("boundary") + purge_tags = module.params.get("purge_tags") + tags = ansible_dict_to_boto3_tag_list(module.params.get("tags")) if module.params.get("tags") else None + purge_policies = module.params.get("purge_policies") + managed_policies = module.params.get("managed_policies") if managed_policies: # Attempt to list the policies early so we don't leave things behind if we can't find them. managed_policies = convert_friendly_names_to_arns(module, client, managed_policies) @@ -485,31 +491,33 @@ def create_or_update_role(module, client): if role is None: role = create_basic_role(module, client) - if not module.check_mode and module.params.get('wait'): + if not module.check_mode and module.params.get("wait"): wait_iam_exists(module, client) changed = True else: # Role exists - get current attributes - current_assumed_policy = role.get('AssumeRolePolicyDocument') - current_description = role.get('Description') - current_duration = role.get('MaxSessionDuration') - current_permissions_boundary = role.get('PermissionsBoundary', {}).get('PermissionsBoundaryArn', '') + current_assumed_policy = role.get("AssumeRolePolicyDocument") + current_description = role.get("Description") + current_duration = role.get("MaxSessionDuration") + current_permissions_boundary = role.get("PermissionsBoundary", {}).get("PermissionsBoundaryArn", "") # Update attributes changed |= update_role_tags(module, client, role_name, tags, purge_tags) changed |= update_role_assumed_policy(module, client, role_name, assumed_policy, current_assumed_policy) changed |= update_role_description(module, client, role_name, description, current_description) changed |= update_role_max_session_duration(module, client, role_name, duration, current_duration) - changed |= update_role_permissions_boundary(module, client, role_name, permissions_boundary, current_permissions_boundary) + changed |= update_role_permissions_boundary( + module, client, role_name, permissions_boundary, current_permissions_boundary + ) - if not module.check_mode and module.params.get('wait'): + if not module.check_mode and module.params.get("wait"): wait_iam_exists(module, client) if create_instance_profile: changed |= create_instance_profiles(module, client, role_name, path) - if not module.check_mode and module.params.get('wait'): + if not module.check_mode and module.params.get("wait"): wait_iam_exists(module, client) changed |= update_managed_policies(module, client, role_name, managed_policies, purge_policies) @@ -517,24 +525,25 @@ def create_or_update_role(module, client): # Get the role again role = get_role(module, client, role_name) - role['AttachedPolicies'] = get_attached_policy_list(module, client, role_name) - role['tags'] = get_role_tags(module, client) + role["AttachedPolicies"] = get_attached_policy_list(module, client, role_name) + role["tags"] = get_role_tags(module, client) - camel_role = camel_dict_to_snake_dict(role, ignore_list=['tags']) + camel_role = camel_dict_to_snake_dict(role, ignore_list=["tags"]) camel_role["assume_role_policy_document_raw"] = role.get("AssumeRolePolicyDocument", {}) module.exit_json(changed=changed, iam_role=camel_role, **camel_role) def create_instance_profiles(module, client, role_name, path): - # Fetch existing Profiles try: - instance_profiles = client.list_instance_profiles_for_role(RoleName=role_name, aws_retry=True)['InstanceProfiles'] + instance_profiles = client.list_instance_profiles_for_role(RoleName=role_name, aws_retry=True)[ + "InstanceProfiles" + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name)) # Profile already exists - if any(p['InstanceProfileName'] == role_name for p in instance_profiles): + if any(p["InstanceProfileName"] == role_name for p in instance_profiles): return False if module.check_mode: @@ -543,11 +552,14 @@ def create_instance_profiles(module, client, role_name, path): # Make sure an instance profile is created try: client.create_instance_profile(InstanceProfileName=role_name, Path=path, aws_retry=True) - except is_boto3_error_code('EntityAlreadyExists'): + except is_boto3_error_code("EntityAlreadyExists"): # If the profile already exists, no problem, move on. # Implies someone's changing things at the same time... return False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(role_name)) # And attach the role to the profile @@ -563,31 +575,39 @@ def remove_instance_profiles(module, client, role_name): delete_profiles = module.params.get("delete_instance_profile") try: - instance_profiles = client.list_instance_profiles_for_role(aws_retry=True, RoleName=role_name)['InstanceProfiles'] + instance_profiles = client.list_instance_profiles_for_role(aws_retry=True, RoleName=role_name)[ + "InstanceProfiles" + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name)) # Remove the role from the instance profile(s) for profile in instance_profiles: - profile_name = profile['InstanceProfileName'] + profile_name = profile["InstanceProfileName"] try: if not module.check_mode: - client.remove_role_from_instance_profile(aws_retry=True, InstanceProfileName=profile_name, RoleName=role_name) + client.remove_role_from_instance_profile( + aws_retry=True, InstanceProfileName=profile_name, RoleName=role_name + ) if profile_name == role_name: if delete_profiles: try: client.delete_instance_profile(InstanceProfileName=profile_name, aws_retry=True) - except is_boto3_error_code('NoSuchEntityException'): + except is_boto3_error_code("NoSuchEntityException"): pass - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to remove instance profile {0}".format(profile_name)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to remove role {0} from instance profile {1}".format(role_name, profile_name)) + module.fail_json_aws( + e, msg="Unable to remove role {0} from instance profile {1}".format(role_name, profile_name) + ) def destroy_role(module, client): - - role_name = module.params.get('name') + role_name = module.params.get("name") role = get_role(module, client, role_name) if role is None: @@ -603,9 +623,12 @@ def destroy_role(module, client): remove_inline_policies(module, client, role_name) try: client.delete_role(aws_retry=True, RoleName=role_name) - except is_boto3_error_code('NoSuchEntityException'): + except is_boto3_error_code("NoSuchEntityException"): module.exit_json(changed=False) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to delete role") module.exit_json(changed=True) @@ -613,38 +636,43 @@ def destroy_role(module, client): def get_role_with_backoff(module, client, name): try: - return AWSRetry.jittered_backoff(catch_extra_error_codes=['NoSuchEntity'])(client.get_role)(RoleName=name)['Role'] + return AWSRetry.jittered_backoff(catch_extra_error_codes=["NoSuchEntity"])(client.get_role)(RoleName=name)[ + "Role" + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to get role {0}".format(name)) def get_role(module, client, name): try: - return client.get_role(RoleName=name, aws_retry=True)['Role'] - except is_boto3_error_code('NoSuchEntity'): + return client.get_role(RoleName=name, aws_retry=True)["Role"] + except is_boto3_error_code("NoSuchEntity"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to get role {0}".format(name)) def get_attached_policy_list(module, client, name): try: - return client.list_attached_role_policies(RoleName=name, aws_retry=True)['AttachedPolicies'] + return client.list_attached_role_policies(RoleName=name, aws_retry=True)["AttachedPolicies"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name)) def get_inline_policy_list(module, client, name): try: - return client.list_role_policies(RoleName=name, aws_retry=True)['PolicyNames'] + return client.list_role_policies(RoleName=name, aws_retry=True)["PolicyNames"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name)) def get_role_tags(module, client): - role_name = module.params.get('name') + role_name = module.params.get("name") try: - return boto3_tag_list_to_ansible_dict(client.list_role_tags(RoleName=role_name, aws_retry=True)['Tags']) + return boto3_tag_list_to_ansible_dict(client.list_role_tags(RoleName=role_name, aws_retry=True)["Tags"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to list tags for role {0}".format(role_name)) @@ -655,7 +683,9 @@ def update_role_tags(module, client, role_name, new_tags, purge_tags): new_tags = boto3_tag_list_to_ansible_dict(new_tags) try: - existing_tags = boto3_tag_list_to_ansible_dict(client.list_role_tags(RoleName=role_name, aws_retry=True)['Tags']) + existing_tags = boto3_tag_list_to_ansible_dict( + client.list_role_tags(RoleName=role_name, aws_retry=True)["Tags"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, KeyError): existing_tags = {} @@ -668,69 +698,76 @@ def update_role_tags(module, client, role_name, new_tags, purge_tags): if tags_to_add: client.tag_role(RoleName=role_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add), aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for role %s' % role_name) + module.fail_json_aws(e, msg="Unable to set tags for role %s" % role_name) changed = bool(tags_to_add) or bool(tags_to_remove) return changed def main(): - argument_spec = dict( - name=dict(type='str', required=True), - path=dict(type='str', default="/"), - assume_role_policy_document=dict(type='json'), - managed_policies=dict(type='list', aliases=['managed_policy'], elements='str'), - max_session_duration=dict(type='int'), - state=dict(type='str', choices=['present', 'absent'], default='present'), - description=dict(type='str'), - boundary=dict(type='str', aliases=['boundary_policy_arn']), - create_instance_profile=dict(type='bool', default=True), - delete_instance_profile=dict(type='bool', default=False), - purge_policies=dict(default=True, type='bool', aliases=['purge_policy', 'purge_managed_policies']), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(default=120, type='int'), + name=dict(type="str", required=True), + path=dict(type="str", default="/"), + assume_role_policy_document=dict(type="json"), + managed_policies=dict(type="list", aliases=["managed_policy"], elements="str"), + max_session_duration=dict(type="int"), + state=dict(type="str", choices=["present", "absent"], default="present"), + description=dict(type="str"), + boundary=dict(type="str", aliases=["boundary_policy_arn"]), + create_instance_profile=dict(type="bool", default=True), + delete_instance_profile=dict(type="bool", default=False), + purge_policies=dict(default=True, type="bool", aliases=["purge_policy", "purge_managed_policies"]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + wait=dict(type="bool", default=True), + wait_timeout=dict(default=120, type="int"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[('state', 'present', ['assume_role_policy_document'])], - supports_check_mode=True) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[("state", "present", ["assume_role_policy_document"])], + supports_check_mode=True, + ) - module.deprecate("All return values other than iam_role and changed have been deprecated and " - "will be removed in a release after 2023-12-01.", - date="2023-12-01", collection_name="community.aws") + module.deprecate( + "All return values other than iam_role and changed have been deprecated and " + "will be removed in a release after 2023-12-01.", + date="2023-12-01", + collection_name="community.aws", + ) - module.deprecate("In a release after 2023-12-01 the contents of iam_role.assume_role_policy_document " - "will no longer be converted from CamelCase to snake_case. The " - "iam_role.assume_role_policy_document_raw return value already returns the " - "policy document in this future format.", - date="2023-12-01", collection_name="community.aws") + module.deprecate( + "In a release after 2023-12-01 the contents of iam_role.assume_role_policy_document " + "will no longer be converted from CamelCase to snake_case. The " + "iam_role.assume_role_policy_document_raw return value already returns the " + "policy document in this future format.", + date="2023-12-01", + collection_name="community.aws", + ) - if module.params.get('boundary'): - if module.params.get('create_instance_profile'): + if module.params.get("boundary"): + if module.params.get("create_instance_profile"): module.fail_json(msg="When using a boundary policy, `create_instance_profile` must be set to `false`.") - if not module.params.get('boundary').startswith('arn:aws:iam'): + if not module.params.get("boundary").startswith("arn:aws:iam"): module.fail_json(msg="Boundary policy must be an ARN") - if module.params.get('max_session_duration'): - max_session_duration = module.params.get('max_session_duration') + if module.params.get("max_session_duration"): + max_session_duration = module.params.get("max_session_duration") if max_session_duration < 3600 or max_session_duration > 43200: module.fail_json(msg="max_session_duration must be between 1 and 12 hours (3600 and 43200 seconds)") - if module.params.get('path'): - path = module.params.get('path') - if not path.endswith('/') or not path.startswith('/'): + if module.params.get("path"): + path = module.params.get("path") + if not path.endswith("/") or not path.startswith("/"): module.fail_json(msg="path must begin and end with /") - client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get("state") - if state == 'present': + if state == "present": create_or_update_role(module, client) - elif state == 'absent': + elif state == "absent": destroy_role(module, client) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/iam_role_info.py b/plugins/modules/iam_role_info.py index e3bdb7695bf..a7576a131ec 100644 --- a/plugins/modules/iam_role_info.py +++ b/plugins/modules/iam_role_info.py @@ -168,70 +168,73 @@ @AWSRetry.jittered_backoff() def list_iam_roles_with_backoff(client, **kwargs): - paginator = client.get_paginator('list_roles') + paginator = client.get_paginator("list_roles") return paginator.paginate(**kwargs).build_full_result() @AWSRetry.jittered_backoff() def list_iam_role_policies_with_backoff(client, role_name): - paginator = client.get_paginator('list_role_policies') - return paginator.paginate(RoleName=role_name).build_full_result()['PolicyNames'] + paginator = client.get_paginator("list_role_policies") + return paginator.paginate(RoleName=role_name).build_full_result()["PolicyNames"] @AWSRetry.jittered_backoff() def list_iam_attached_role_policies_with_backoff(client, role_name): - paginator = client.get_paginator('list_attached_role_policies') - return paginator.paginate(RoleName=role_name).build_full_result()['AttachedPolicies'] + paginator = client.get_paginator("list_attached_role_policies") + return paginator.paginate(RoleName=role_name).build_full_result()["AttachedPolicies"] @AWSRetry.jittered_backoff() def list_iam_instance_profiles_for_role_with_backoff(client, role_name): - paginator = client.get_paginator('list_instance_profiles_for_role') - return paginator.paginate(RoleName=role_name).build_full_result()['InstanceProfiles'] + paginator = client.get_paginator("list_instance_profiles_for_role") + return paginator.paginate(RoleName=role_name).build_full_result()["InstanceProfiles"] def describe_iam_role(module, client, role): - name = role['RoleName'] + name = role["RoleName"] try: - role['InlinePolicies'] = list_iam_role_policies_with_backoff(client, name) + role["InlinePolicies"] = list_iam_role_policies_with_backoff(client, name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get inline policies for role %s" % name) try: - role['ManagedPolicies'] = list_iam_attached_role_policies_with_backoff(client, name) + role["ManagedPolicies"] = list_iam_attached_role_policies_with_backoff(client, name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get managed policies for role %s" % name) try: - role['InstanceProfiles'] = list_iam_instance_profiles_for_role_with_backoff(client, name) + role["InstanceProfiles"] = list_iam_instance_profiles_for_role_with_backoff(client, name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get instance profiles for role %s" % name) try: - role['tags'] = boto3_tag_list_to_ansible_dict(role['Tags']) - del role['Tags'] + role["tags"] = boto3_tag_list_to_ansible_dict(role["Tags"]) + del role["Tags"] except KeyError: - role['tags'] = {} + role["tags"] = {} return role def describe_iam_roles(module, client): - name = module.params['name'] - path_prefix = module.params['path_prefix'] + name = module.params["name"] + path_prefix = module.params["path_prefix"] if name: try: - roles = [client.get_role(RoleName=name, aws_retry=True)['Role']] - except is_boto3_error_code('NoSuchEntity'): + roles = [client.get_role(RoleName=name, aws_retry=True)["Role"]] + except is_boto3_error_code("NoSuchEntity"): return [] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't get IAM role %s" % name) else: params = dict() if path_prefix: - if not path_prefix.startswith('/'): - path_prefix = '/' + path_prefix - if not path_prefix.endswith('/'): - path_prefix = path_prefix + '/' - params['PathPrefix'] = path_prefix + if not path_prefix.startswith("/"): + path_prefix = "/" + path_prefix + if not path_prefix.endswith("/"): + path_prefix = path_prefix + "/" + params["PathPrefix"] = path_prefix try: - roles = list_iam_roles_with_backoff(client, **params)['Roles'] + roles = list_iam_roles_with_backoff(client, **params)["Roles"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't list IAM roles") return [normalize_role(describe_iam_role(module, client, role)) for role in roles] @@ -245,7 +248,7 @@ def normalize_profile(profile): def normalize_role(role): - new_role = camel_dict_to_snake_dict(role, ignore_list=['tags']) + new_role = camel_dict_to_snake_dict(role, ignore_list=["tags"]) new_role["assume_role_policy_document_raw"] = role.get("AssumeRolePolicyDocument") if role.get("InstanceProfiles"): role["instance_profiles"] = [normalize_profile(profile) for profile in role.get("InstanceProfiles")] @@ -254,27 +257,32 @@ def normalize_role(role): def main(): """ - Module action handler + Module action handler """ argument_spec = dict( - name=dict(aliases=['role_name']), + name=dict(aliases=["role_name"]), path_prefix=dict(), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[['name', 'path_prefix']]) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[["name", "path_prefix"]], + ) - client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) - module.deprecate("In a release after 2023-12-01 the contents of assume_role_policy_document " - "will no longer be converted from CamelCase to snake_case. The " - ".assume_role_policy_document_raw return value already returns the " - "policy document in this future format.", - date="2023-12-01", collection_name="community.aws") + module.deprecate( + "In a release after 2023-12-01 the contents of assume_role_policy_document " + "will no longer be converted from CamelCase to snake_case. The " + ".assume_role_policy_document_raw return value already returns the " + "policy document in this future format.", + date="2023-12-01", + collection_name="community.aws", + ) module.exit_json(changed=False, iam_roles=describe_iam_roles(module, client)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/iam_saml_federation.py b/plugins/modules/iam_saml_federation.py index e134588f7ef..238aa5d9a3f 100644 --- a/plugins/modules/iam_saml_federation.py +++ b/plugins/modules/iam_saml_federation.py @@ -106,7 +106,7 @@ def __init__(self, module): self.module = module try: - self.conn = module.client('iam') + self.conn = module.client("iam") except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Unknown AWS SDK error") @@ -133,10 +133,10 @@ def _delete_saml_provider(self, arn): def _get_provider_arn(self, name): providers = self._list_saml_providers() - for p in providers['SAMLProviderList']: - provider_name = p['Arn'].split('/', 1)[1] + for p in providers["SAMLProviderList"]: + provider_name = p["Arn"].split("/", 1)[1] if name == provider_name: - return p['Arn'] + return p["Arn"] return None @@ -144,7 +144,7 @@ def create_or_update_saml_provider(self, name, metadata): if not metadata: self.module.fail_json(msg="saml_metadata_document must be defined for present state") - res = {'changed': False} + res = {"changed": False} try: arn = self._get_provider_arn(name) except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e: @@ -156,38 +156,38 @@ def create_or_update_saml_provider(self, name, metadata): except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e: self.module.fail_json_aws(e, msg="Could not retrieve the identity provider '{0}'".format(name)) - if metadata.strip() != resp['SAMLMetadataDocument'].strip(): + if metadata.strip() != resp["SAMLMetadataDocument"].strip(): # provider needs updating - res['changed'] = True + res["changed"] = True if not self.module.check_mode: try: resp = self._update_saml_provider(arn, metadata) - res['saml_provider'] = self._build_res(resp['SAMLProviderArn']) + res["saml_provider"] = self._build_res(resp["SAMLProviderArn"]) except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Could not update the identity provider '{0}'".format(name)) else: - res['saml_provider'] = self._build_res(arn) + res["saml_provider"] = self._build_res(arn) else: # create - res['changed'] = True + res["changed"] = True if not self.module.check_mode: try: resp = self._create_saml_provider(metadata, name) - res['saml_provider'] = self._build_res(resp['SAMLProviderArn']) + res["saml_provider"] = self._build_res(resp["SAMLProviderArn"]) except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Could not create the identity provider '{0}'".format(name)) self.module.exit_json(**res) def delete_saml_provider(self, name): - res = {'changed': False} + res = {"changed": False} try: arn = self._get_provider_arn(name) except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e: self.module.fail_json_aws(e, msg="Could not get the ARN of the identity provider '{0}'".format(name)) if arn: # delete - res['changed'] = True + res["changed"] = True if not self.module.check_mode: try: self._delete_saml_provider(arn) @@ -202,7 +202,7 @@ def _build_res(self, arn): "arn": arn, "metadata_document": saml_provider["SAMLMetadataDocument"], "create_date": saml_provider["CreateDate"].isoformat(), - "expire_date": saml_provider["ValidUntil"].isoformat() + "expire_date": saml_provider["ValidUntil"].isoformat(), } @@ -210,26 +210,26 @@ def main(): argument_spec = dict( name=dict(required=True), saml_metadata_document=dict(default=None, required=False), - state=dict(default='present', required=False, choices=['present', 'absent']), + state=dict(default="present", required=False, choices=["present", "absent"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - required_if=[('state', 'present', ['saml_metadata_document'])] + required_if=[("state", "present", ["saml_metadata_document"])], ) - name = module.params['name'] - state = module.params.get('state') - saml_metadata_document = module.params.get('saml_metadata_document') + name = module.params["name"] + state = module.params.get("state") + saml_metadata_document = module.params.get("saml_metadata_document") sp_man = SAMLProviderManager(module) - if state == 'present': + if state == "present": sp_man.create_or_update_saml_provider(name, saml_metadata_document) - elif state == 'absent': + elif state == "absent": sp_man.delete_saml_provider(name) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/iam_server_certificate.py b/plugins/modules/iam_server_certificate.py index 3ab35fb6864..dd8427dc15b 100644 --- a/plugins/modules/iam_server_certificate.py +++ b/plugins/modules/iam_server_certificate.py @@ -109,22 +109,22 @@ @AWSRetry.jittered_backoff() def _list_server_certficates(): - paginator = client.get_paginator('list_server_certificates') - return paginator.paginate().build_full_result()['ServerCertificateMetadataList'] + paginator = client.get_paginator("list_server_certificates") + return paginator.paginate().build_full_result()["ServerCertificateMetadataList"] def check_duplicate_cert(new_cert): - orig_cert_names = list(c['ServerCertificateName'] for c in _list_server_certficates()) + orig_cert_names = list(c["ServerCertificateName"] for c in _list_server_certficates()) for cert_name in orig_cert_names: cert = get_server_certificate(cert_name) if not cert: continue - cert_body = cert.get('certificate_body', None) + cert_body = cert.get("certificate_body", None) if not _compare_cert(new_cert, cert_body): continue module.fail_json( changed=False, - msg='This certificate already exists under the name {0} and dup_ok=False'.format(cert_name), + msg="This certificate already exists under the name {0} and dup_ok=False".format(cert_name), duplicate_cert=cert, ) @@ -137,25 +137,25 @@ def _compare_cert(cert_a, cert_b): # Trim out the whitespace before comparing the certs. While this could mean # an invalid cert 'matches' a valid cert, that's better than some stray # whitespace breaking things - cert_a.replace('\r', '') - cert_a.replace('\n', '') - cert_a.replace(' ', '') - cert_b.replace('\r', '') - cert_b.replace('\n', '') - cert_b.replace(' ', '') + cert_a.replace("\r", "") + cert_a.replace("\n", "") + cert_a.replace(" ", "") + cert_b.replace("\r", "") + cert_b.replace("\n", "") + cert_b.replace(" ", "") return cert_a == cert_b def update_server_certificate(current_cert): changed = False - cert = module.params.get('cert') - cert_chain = module.params.get('cert_chain') + cert = module.params.get("cert") + cert_chain = module.params.get("cert_chain") - if not _compare_cert(cert, current_cert.get('certificate_body', None)): - module.fail_json(msg='Modifying the certificate body is not supported by AWS') - if not _compare_cert(cert_chain, current_cert.get('certificate_chain', None)): - module.fail_json(msg='Modifying the chaining certificate is not supported by AWS') + if not _compare_cert(cert, current_cert.get("certificate_body", None)): + module.fail_json(msg="Modifying the certificate body is not supported by AWS") + if not _compare_cert(cert_chain, current_cert.get("certificate_chain", None)): + module.fail_json(msg="Modifying the chaining certificate is not supported by AWS") # We can't compare keys. if module.check_mode: @@ -168,15 +168,15 @@ def update_server_certificate(current_cert): def create_server_certificate(): - cert = module.params.get('cert') - key = module.params.get('key') - cert_chain = module.params.get('cert_chain') + cert = module.params.get("cert") + key = module.params.get("key") + cert_chain = module.params.get("cert_chain") - if not module.params.get('dup_ok'): + if not module.params.get("dup_ok"): check_duplicate_cert(cert) - path = module.params.get('path') - name = module.params.get('name') + path = module.params.get("path") + name = module.params.get("name") params = dict( ServerCertificateName=name, @@ -185,28 +185,25 @@ def create_server_certificate(): ) if cert_chain: - params['CertificateChain'] = cert_chain + params["CertificateChain"] = cert_chain if path: - params['Path'] = path + params["Path"] = path if module.check_mode: return True try: - client.upload_server_certificate( - aws_retry=True, - **params - ) + client.upload_server_certificate(aws_retry=True, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to update server certificate {0}'.format(name)) + module.fail_json_aws(e, msg="Failed to update server certificate {0}".format(name)) return True def rename_server_certificate(current_cert): - name = module.params.get('name') - new_name = module.params.get('new_name') - new_path = module.params.get('new_path') + name = module.params.get("name") + new_name = module.params.get("new_name") + new_path = module.params.get("new_path") changes = dict() @@ -215,16 +212,16 @@ def rename_server_certificate(current_cert): current_cert = get_server_certificate(new_name) else: if new_name: - changes['NewServerCertificateName'] = new_name + changes["NewServerCertificateName"] = new_name - cert_metadata = current_cert.get('server_certificate_metadata', {}) + cert_metadata = current_cert.get("server_certificate_metadata", {}) if not current_cert: - module.fail_json(msg='Unable to find certificate {0}'.format(name)) + module.fail_json(msg="Unable to find certificate {0}".format(name)) - current_path = cert_metadata.get('path', None) + current_path = cert_metadata.get("path", None) if new_path and current_path != new_path: - changes['NewPath'] = new_path + changes["NewPath"] = new_path if not changes: return False @@ -233,14 +230,9 @@ def rename_server_certificate(current_cert): return True try: - client.update_server_certificate( - aws_retry=True, - ServerCertificateName=name, - **changes - ) + client.update_server_certificate(aws_retry=True, ServerCertificateName=name, **changes) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to update server certificate {0}'.format(name), - changes=changes) + module.fail_json_aws(e, msg="Failed to update server certificate {0}".format(name), changes=changes) return True @@ -252,17 +244,20 @@ def delete_server_certificate(current_cert): if module.check_mode: return True - name = module.params.get('name') + name = module.params.get("name") try: result = client.delete_server_certificate( aws_retry=True, ServerCertificateName=name, ) - except is_boto3_error_code('NoSuchEntity'): + except is_boto3_error_code("NoSuchEntity"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to delete server certificate {0}'.format(name)) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to delete server certificate {0}".format(name)) return True @@ -275,11 +270,14 @@ def get_server_certificate(name): aws_retry=True, ServerCertificateName=name, ) - except is_boto3_error_code('NoSuchEntity'): + except is_boto3_error_code("NoSuchEntity"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to get server certificate {0}'.format(name)) - cert = dict(camel_dict_to_snake_dict(result.get('ServerCertificate'))) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to get server certificate {0}".format(name)) + cert = dict(camel_dict_to_snake_dict(result.get("ServerCertificate"))) return cert @@ -289,75 +287,74 @@ def compatability_results(current_cert): if not current_cert: return compat_results - metadata = current_cert.get('server_certificate_metadata', {}) - - if current_cert.get('certificate_body', None): - compat_results['cert_body'] = current_cert.get('certificate_body') - if current_cert.get('certificate_chain', None): - compat_results['chain_cert_body'] = current_cert.get('certificate_chain') - if metadata.get('arn', None): - compat_results['arn'] = metadata.get('arn') - if metadata.get('expiration', None): - compat_results['expiration_date'] = metadata.get('expiration') - if metadata.get('path', None): - compat_results['cert_path'] = metadata.get('path') - if metadata.get('server_certificate_name', None): - compat_results['name'] = metadata.get('server_certificate_name') - if metadata.get('upload_date', None): - compat_results['upload_date'] = metadata.get('upload_date') + metadata = current_cert.get("server_certificate_metadata", {}) + + if current_cert.get("certificate_body", None): + compat_results["cert_body"] = current_cert.get("certificate_body") + if current_cert.get("certificate_chain", None): + compat_results["chain_cert_body"] = current_cert.get("certificate_chain") + if metadata.get("arn", None): + compat_results["arn"] = metadata.get("arn") + if metadata.get("expiration", None): + compat_results["expiration_date"] = metadata.get("expiration") + if metadata.get("path", None): + compat_results["cert_path"] = metadata.get("path") + if metadata.get("server_certificate_name", None): + compat_results["name"] = metadata.get("server_certificate_name") + if metadata.get("upload_date", None): + compat_results["upload_date"] = metadata.get("upload_date") return compat_results def main(): - global module global client argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), + state=dict(required=True, choices=["present", "absent"]), name=dict(required=True), cert=dict(), key=dict(no_log=True), cert_chain=dict(), new_name=dict(), - path=dict(default='/'), + path=dict(default="/"), new_path=dict(), - dup_ok=dict(type='bool', default=True), + dup_ok=dict(type="bool", default=True), ) module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[ - ['new_path', 'key'], - ['new_path', 'cert'], - ['new_path', 'cert_chain'], - ['new_name', 'key'], - ['new_name', 'cert'], - ['new_name', 'cert_chain'], + ["new_path", "key"], + ["new_path", "cert"], + ["new_path", "cert_chain"], + ["new_name", "key"], + ["new_name", "cert"], + ["new_name", "cert_chain"], ], supports_check_mode=True, ) - client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) - state = module.params.get('state') - name = module.params.get('name') - path = module.params.get('path') - new_name = module.params.get('new_name') - new_path = module.params.get('new_path') - dup_ok = module.params.get('dup_ok') + state = module.params.get("state") + name = module.params.get("name") + path = module.params.get("path") + new_name = module.params.get("new_name") + new_path = module.params.get("new_path") + dup_ok = module.params.get("dup_ok") current_cert = get_server_certificate(name) results = dict() - if state == 'absent': + if state == "absent": changed = delete_server_certificate(current_cert) if changed: - results['deleted_cert'] = name + results["deleted_cert"] = name else: - msg = 'Certificate with the name {0} already absent'.format(name) - results['msg'] = msg + msg = "Certificate with the name {0} already absent".format(name) + results["msg"] = msg else: if new_name or new_path: changed = rename_server_certificate(current_cert) @@ -371,16 +368,13 @@ def main(): changed = create_server_certificate() updated_cert = get_server_certificate(name) - results['server_certificate'] = updated_cert + results["server_certificate"] = updated_cert compat_results = compatability_results(updated_cert) if compat_results: results.update(compat_results) - module.exit_json( - changed=changed, - **results - ) + module.exit_json(changed=changed, **results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/iam_server_certificate_info.py b/plugins/modules/iam_server_certificate_info.py index eb38a5f8b48..5504cb746fd 100644 --- a/plugins/modules/iam_server_certificate_info.py +++ b/plugins/modules/iam_server_certificate_info.py @@ -110,22 +110,24 @@ def get_server_certs(iam, name=None): results = dict() try: if name: - server_certs = [iam.get_server_certificate(ServerCertificateName=name)['ServerCertificate']] + server_certs = [iam.get_server_certificate(ServerCertificateName=name)["ServerCertificate"]] else: - server_certs = iam.list_server_certificates()['ServerCertificateMetadataList'] + server_certs = iam.list_server_certificates()["ServerCertificateMetadataList"] for server_cert in server_certs: if not name: - server_cert = iam.get_server_certificate(ServerCertificateName=server_cert['ServerCertificateName'])['ServerCertificate'] - cert_md = server_cert['ServerCertificateMetadata'] - results[cert_md['ServerCertificateName']] = { - 'certificate_body': server_cert['CertificateBody'], - 'server_certificate_id': cert_md['ServerCertificateId'], - 'server_certificate_name': cert_md['ServerCertificateName'], - 'arn': cert_md['Arn'], - 'path': cert_md['Path'], - 'expiration': cert_md['Expiration'].isoformat(), - 'upload_date': cert_md['UploadDate'].isoformat(), + server_cert = iam.get_server_certificate(ServerCertificateName=server_cert["ServerCertificateName"])[ + "ServerCertificate" + ] + cert_md = server_cert["ServerCertificateMetadata"] + results[cert_md["ServerCertificateName"]] = { + "certificate_body": server_cert["CertificateBody"], + "server_certificate_id": cert_md["ServerCertificateId"], + "server_certificate_name": cert_md["ServerCertificateName"], + "arn": cert_md["Arn"], + "path": cert_md["Path"], + "expiration": cert_md["Expiration"].isoformat(), + "upload_date": cert_md["UploadDate"].isoformat(), } except botocore.exceptions.ClientError: @@ -136,7 +138,7 @@ def get_server_certs(iam, name=None): def main(): argument_spec = dict( - name=dict(type='str'), + name=dict(type="str"), ) module = AnsibleAWSModule( @@ -145,14 +147,14 @@ def main(): ) try: - iam = module.client('iam') + iam = module.client("iam") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - cert_name = module.params.get('name') + cert_name = module.params.get("name") results = get_server_certs(iam, cert_name) module.exit_json(results=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/inspector_target.py b/plugins/modules/inspector_target.py index 8891fa34a67..f9ec6d53a84 100644 --- a/plugins/modules/inspector_target.py +++ b/plugins/modules/inspector_target.py @@ -116,11 +116,11 @@ def main(): argument_spec = dict( name=dict(required=True), - state=dict(choices=['absent', 'present'], default='present'), - tags=dict(type='dict'), + state=dict(choices=["absent", "present"], default="present"), + tags=dict(type="dict"), ) - required_if = [['state', 'present', ['tags']]] + required_if = [["state", "present", ["tags"]]] module = AnsibleAWSModule( argument_spec=argument_spec, @@ -128,29 +128,37 @@ def main(): required_if=required_if, ) - name = module.params.get('name') - state = module.params.get('state').lower() - tags = module.params.get('tags') + name = module.params.get("name") + state = module.params.get("state").lower() + tags = module.params.get("tags") if tags: - tags = ansible_dict_to_boto3_tag_list(tags, 'key', 'value') + tags = ansible_dict_to_boto3_tag_list(tags, "key", "value") - client = module.client('inspector') + client = module.client("inspector") try: existing_target_arn = client.list_assessment_targets( - filter={'assessmentTargetNamePattern': name}, - ).get('assessmentTargetArns')[0] + filter={"assessmentTargetNamePattern": name}, + ).get( + "assessmentTargetArns" + )[0] existing_target = camel_dict_to_snake_dict( client.describe_assessment_targets( assessmentTargetArns=[existing_target_arn], - ).get('assessmentTargets')[0] + ).get( + "assessmentTargets" + )[0] ) - existing_resource_group_arn = existing_target.get('resource_group_arn') - existing_resource_group_tags = client.describe_resource_groups( - resourceGroupArns=[existing_resource_group_arn], - ).get('resourceGroups')[0].get('tags') + existing_resource_group_arn = existing_target.get("resource_group_arn") + existing_resource_group_tags = ( + client.describe_resource_groups( + resourceGroupArns=[existing_resource_group_arn], + ) + .get("resourceGroups")[0] + .get("tags") + ) target_exists = True except ( @@ -161,23 +169,18 @@ def main(): except IndexError: target_exists = False - if state == 'present' and target_exists: + if state == "present" and target_exists: ansible_dict_tags = boto3_tag_list_to_ansible_dict(tags) - ansible_dict_existing_tags = boto3_tag_list_to_ansible_dict( - existing_resource_group_tags - ) - tags_to_add, tags_to_remove = compare_aws_tags( - ansible_dict_tags, - ansible_dict_existing_tags - ) + ansible_dict_existing_tags = boto3_tag_list_to_ansible_dict(existing_resource_group_tags) + tags_to_add, tags_to_remove = compare_aws_tags(ansible_dict_tags, ansible_dict_existing_tags) if not (tags_to_add or tags_to_remove): - existing_target.update({'tags': ansible_dict_existing_tags}) + existing_target.update({"tags": ansible_dict_existing_tags}) module.exit_json(changed=False, **existing_target) else: try: updated_resource_group_arn = client.create_resource_group( resourceGroupTags=tags, - ).get('resourceGroupArn') + ).get("resourceGroupArn") client.update_assessment_target( assessmentTargetArn=existing_target_arn, @@ -188,10 +191,12 @@ def main(): updated_target = camel_dict_to_snake_dict( client.describe_assessment_targets( assessmentTargetArns=[existing_target_arn], - ).get('assessmentTargets')[0] + ).get( + "assessmentTargets" + )[0] ) - updated_target.update({'tags': ansible_dict_tags}) + updated_target.update({"tags": ansible_dict_tags}) module.exit_json(changed=True, **updated_target) except ( botocore.exceptions.BotoCoreError, @@ -199,24 +204,26 @@ def main(): ) as e: module.fail_json_aws(e, msg="trying to update target") - elif state == 'present' and not target_exists: + elif state == "present" and not target_exists: try: new_resource_group_arn = client.create_resource_group( resourceGroupTags=tags, - ).get('resourceGroupArn') + ).get("resourceGroupArn") new_target_arn = client.create_assessment_target( assessmentTargetName=name, resourceGroupArn=new_resource_group_arn, - ).get('assessmentTargetArn') + ).get("assessmentTargetArn") new_target = camel_dict_to_snake_dict( client.describe_assessment_targets( assessmentTargetArns=[new_target_arn], - ).get('assessmentTargets')[0] + ).get( + "assessmentTargets" + )[0] ) - new_target.update({'tags': boto3_tag_list_to_ansible_dict(tags)}) + new_target.update({"tags": boto3_tag_list_to_ansible_dict(tags)}) module.exit_json(changed=True, **new_target) except ( botocore.exceptions.BotoCoreError, @@ -224,7 +231,7 @@ def main(): ) as e: module.fail_json_aws(e, msg="trying to create target") - elif state == 'absent' and target_exists: + elif state == "absent" and target_exists: try: client.delete_assessment_target( assessmentTargetArn=existing_target_arn, @@ -236,9 +243,9 @@ def main(): ) as e: module.fail_json_aws(e, msg="trying to delete target") - elif state == 'absent' and not target_exists: + elif state == "absent" and not target_exists: module.exit_json(changed=False) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/kinesis_stream.py b/plugins/modules/kinesis_stream.py index 2bcca6a4ad4..8147f60f3db 100644 --- a/plugins/modules/kinesis_stream.py +++ b/plugins/modules/kinesis_stream.py @@ -209,16 +209,14 @@ def get_tags(client, stream_name): Returns: Tuple (bool, str, dict) """ - err_msg = '' + err_msg = "" success = False params = { - 'StreamName': stream_name, + "StreamName": stream_name, } results = dict() try: - results = ( - client.list_tags_for_stream(**params)['Tags'] - ) + results = client.list_tags_for_stream(**params)["Tags"] success = True except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -239,28 +237,26 @@ def find_stream(client, stream_name): Returns: Tuple (bool, str, dict) """ - err_msg = '' + err_msg = "" success = False params = { - 'StreamName': stream_name, + "StreamName": stream_name, } results = dict() has_more_shards = True shards = list() try: while has_more_shards: - results = ( - client.describe_stream(**params)['StreamDescription'] - ) - shards.extend(results.pop('Shards')) - has_more_shards = results['HasMoreShards'] + results = client.describe_stream(**params)["StreamDescription"] + shards.extend(results.pop("Shards")) + has_more_shards = results["HasMoreShards"] if has_more_shards: - params['ExclusiveStartShardId'] = shards[-1]['ShardId'] - results['Shards'] = shards - num_closed_shards = len([s for s in shards if 'EndingSequenceNumber' in s['SequenceNumberRange']]) - results['OpenShardsCount'] = len(shards) - num_closed_shards - results['ClosedShardsCount'] = num_closed_shards - results['ShardsCount'] = len(shards) + params["ExclusiveStartShardId"] = shards[-1]["ShardId"] + results["Shards"] = shards + num_closed_shards = len([s for s in shards if "EndingSequenceNumber" in s["SequenceNumberRange"]]) + results["OpenShardsCount"] = len(shards) - num_closed_shards + results["ClosedShardsCount"] = num_closed_shards + results["ShardsCount"] = len(shards) success = True except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -268,8 +264,7 @@ def find_stream(client, stream_name): return success, err_msg, results -def wait_for_status(client, stream_name, status, wait_timeout=300, - check_mode=False): +def wait_for_status(client, stream_name, status, wait_timeout=300, check_mode=False): """Wait for the status to change for a Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client @@ -298,16 +293,14 @@ def wait_for_status(client, stream_name, status, wait_timeout=300, while wait_timeout > time.time(): try: - find_success, find_msg, stream = ( - find_stream(client, stream_name) - ) + find_success, find_msg, stream = find_stream(client, stream_name) if check_mode: status_achieved = True break - elif status != 'DELETING': + elif status != "DELETING": if find_success and stream: - if stream.get('StreamStatus') == status: + if stream.get("StreamStatus") == status: status_achieved = True break @@ -329,7 +322,7 @@ def wait_for_status(client, stream_name, status, wait_timeout=300, return status_achieved, err_msg, stream -def tags_action(client, stream_name, tags, action='create', check_mode=False): +def tags_action(client, stream_name, tags, action="create", check_mode=False): """Create or delete multiple tags from a Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -356,26 +349,26 @@ def tags_action(client, stream_name, tags, action='create', check_mode=False): """ success = False err_msg = "" - params = {'StreamName': stream_name} + params = {"StreamName": stream_name} try: if not check_mode: - if action == 'create': - params['Tags'] = tags + if action == "create": + params["Tags"] = tags client.add_tags_to_stream(**params) success = True - elif action == 'delete': - params['TagKeys'] = tags + elif action == "delete": + params["TagKeys"] = tags client.remove_tags_from_stream(**params) success = True else: - err_msg = 'Invalid action {0}'.format(action) + err_msg = "Invalid action {0}".format(action) else: - if action == 'create': + if action == "create": success = True - elif action == 'delete': + elif action == "delete": success = True else: - err_msg = 'Invalid action {0}'.format(action) + err_msg = "Invalid action {0}".format(action) except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -405,32 +398,25 @@ def update_tags(client, stream_name, tags, check_mode=False): """ success = False changed = False - err_msg = '' - tag_success, tag_msg, current_tags = ( - get_tags(client, stream_name) - ) + err_msg = "" + tag_success, tag_msg, current_tags = get_tags(client, stream_name) tags_to_set, tags_to_delete = compare_aws_tags( - current_tags, tags, + current_tags, + tags, purge_tags=True, ) if tags_to_delete: - delete_success, delete_msg = ( - tags_action( - client, stream_name, tags_to_delete, action='delete', - check_mode=check_mode - ) + delete_success, delete_msg = tags_action( + client, stream_name, tags_to_delete, action="delete", check_mode=check_mode ) if not delete_success: return delete_success, changed, delete_msg - tag_msg = 'Tags removed' + tag_msg = "Tags removed" if tags_to_set: - create_success, create_msg = ( - tags_action( - client, stream_name, tags_to_set, action='create', - check_mode=check_mode - ) + create_success, create_msg = tags_action( + client, stream_name, tags_to_set, action="create", check_mode=check_mode ) if create_success: changed = True @@ -439,8 +425,7 @@ def update_tags(client, stream_name, tags, check_mode=False): return success, changed, err_msg -def stream_action(client, stream_name, shard_count=1, action='create', - timeout=300, check_mode=False): +def stream_action(client, stream_name, shard_count=1, action="create", timeout=300, check_mode=False): """Create or Delete an Amazon Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -464,28 +449,26 @@ def stream_action(client, stream_name, shard_count=1, action='create', List (bool, str) """ success = False - err_msg = '' - params = { - 'StreamName': stream_name - } + err_msg = "" + params = {"StreamName": stream_name} try: if not check_mode: - if action == 'create': - params['ShardCount'] = shard_count + if action == "create": + params["ShardCount"] = shard_count client.create_stream(**params) success = True - elif action == 'delete': + elif action == "delete": client.delete_stream(**params) success = True else: - err_msg = 'Invalid action {0}'.format(action) + err_msg = "Invalid action {0}".format(action) else: - if action == 'create': + if action == "create": success = True - elif action == 'delete': + elif action == "delete": success = True else: - err_msg = 'Invalid action {0}'.format(action) + err_msg = "Invalid action {0}".format(action) except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -493,8 +476,9 @@ def stream_action(client, stream_name, shard_count=1, action='create', return success, err_msg -def stream_encryption_action(client, stream_name, action='start_encryption', encryption_type='', key_id='', - timeout=300, check_mode=False): +def stream_encryption_action( + client, stream_name, action="start_encryption", encryption_type="", key_id="", timeout=300, check_mode=False +): """Create, Encrypt or Delete an Amazon Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -520,31 +504,29 @@ def stream_encryption_action(client, stream_name, action='start_encryption', enc List (bool, str) """ success = False - err_msg = '' - params = { - 'StreamName': stream_name - } + err_msg = "" + params = {"StreamName": stream_name} try: if not check_mode: - if action == 'start_encryption': - params['EncryptionType'] = encryption_type - params['KeyId'] = key_id + if action == "start_encryption": + params["EncryptionType"] = encryption_type + params["KeyId"] = key_id client.start_stream_encryption(**params) success = True - elif action == 'stop_encryption': - params['EncryptionType'] = encryption_type - params['KeyId'] = key_id + elif action == "stop_encryption": + params["EncryptionType"] = encryption_type + params["KeyId"] = key_id client.stop_stream_encryption(**params) success = True else: - err_msg = 'Invalid encryption action {0}'.format(action) + err_msg = "Invalid encryption action {0}".format(action) else: - if action == 'start_encryption': + if action == "start_encryption": success = True - elif action == 'stop_encryption': + elif action == "stop_encryption": success = True else: - err_msg = 'Invalid encryption action {0}'.format(action) + err_msg = "Invalid encryption action {0}".format(action) except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -552,8 +534,7 @@ def stream_encryption_action(client, stream_name, action='start_encryption', enc return success, err_msg -def retention_action(client, stream_name, retention_period=24, - action='increase', check_mode=False): +def retention_action(client, stream_name, retention_period=24, action="increase", check_mode=False): """Increase or Decrease the retention of messages in the Kinesis stream. Args: client (botocore.client.EC2): Boto3 client. @@ -578,35 +559,29 @@ def retention_action(client, stream_name, retention_period=24, Tuple (bool, str) """ success = False - err_msg = '' - params = { - 'StreamName': stream_name - } + err_msg = "" + params = {"StreamName": stream_name} try: if not check_mode: - if action == 'increase': - params['RetentionPeriodHours'] = retention_period + if action == "increase": + params["RetentionPeriodHours"] = retention_period client.increase_stream_retention_period(**params) success = True - err_msg = ( - 'Retention Period increased successfully to {0}'.format(retention_period) - ) - elif action == 'decrease': - params['RetentionPeriodHours'] = retention_period + err_msg = "Retention Period increased successfully to {0}".format(retention_period) + elif action == "decrease": + params["RetentionPeriodHours"] = retention_period client.decrease_stream_retention_period(**params) success = True - err_msg = ( - 'Retention Period decreased successfully to {0}'.format(retention_period) - ) + err_msg = "Retention Period decreased successfully to {0}".format(retention_period) else: - err_msg = 'Invalid action {0}'.format(action) + err_msg = "Invalid action {0}".format(action) else: - if action == 'increase': + if action == "increase": success = True - elif action == 'decrease': + elif action == "decrease": success = True else: - err_msg = 'Invalid action {0}'.format(action) + err_msg = "Invalid action {0}".format(action) except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -636,13 +611,10 @@ def update_shard_count(client, stream_name, number_of_shards=1, check_mode=False Tuple (bool, str) """ success = True - err_msg = '' - params = { - 'StreamName': stream_name, - 'ScalingType': 'UNIFORM_SCALING' - } + err_msg = "" + params = {"StreamName": stream_name, "ScalingType": "UNIFORM_SCALING"} if not check_mode: - params['TargetShardCount'] = number_of_shards + params["TargetShardCount"] = number_of_shards try: client.update_shard_count(**params) except botocore.exceptions.ClientError as e: @@ -651,8 +623,17 @@ def update_shard_count(client, stream_name, number_of_shards=1, check_mode=False return success, err_msg -def update(client, current_stream, stream_name, number_of_shards=1, retention_period=None, - tags=None, wait=False, wait_timeout=300, check_mode=False): +def update( + client, + current_stream, + stream_name, + number_of_shards=1, + retention_period=None, + tags=None, + wait=False, + wait_timeout=300, + check_mode=False, +): """Update an Amazon Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -692,43 +673,30 @@ def update(client, current_stream, stream_name, number_of_shards=1, retention_pe """ success = True changed = False - err_msg = '' + err_msg = "" if retention_period: if wait: - wait_success, wait_msg, current_stream = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) + wait_success, wait_msg, current_stream = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) if not wait_success: return wait_success, False, wait_msg - if current_stream.get('StreamStatus') == 'ACTIVE': + if current_stream.get("StreamStatus") == "ACTIVE": retention_changed = False - if retention_period > current_stream['RetentionPeriodHours']: - retention_changed, retention_msg = ( - retention_action( - client, stream_name, retention_period, action='increase', - check_mode=check_mode - ) + if retention_period > current_stream["RetentionPeriodHours"]: + retention_changed, retention_msg = retention_action( + client, stream_name, retention_period, action="increase", check_mode=check_mode ) - elif retention_period < current_stream['RetentionPeriodHours']: - retention_changed, retention_msg = ( - retention_action( - client, stream_name, retention_period, action='decrease', - check_mode=check_mode - ) + elif retention_period < current_stream["RetentionPeriodHours"]: + retention_changed, retention_msg = retention_action( + client, stream_name, retention_period, action="decrease", check_mode=check_mode ) - elif retention_period == current_stream['RetentionPeriodHours']: - retention_msg = ( - 'Retention {0} is the same as {1}' - .format( - retention_period, - current_stream['RetentionPeriodHours'] - ) + elif retention_period == current_stream["RetentionPeriodHours"]: + retention_msg = "Retention {0} is the same as {1}".format( + retention_period, current_stream["RetentionPeriodHours"] ) success = True @@ -738,36 +706,27 @@ def update(client, current_stream, stream_name, number_of_shards=1, retention_pe err_msg = retention_msg if changed and wait: - wait_success, wait_msg, current_stream = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) + wait_success, wait_msg, current_stream = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) if not wait_success: return wait_success, False, wait_msg elif changed and not wait: - stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name) - ) + stream_found, stream_msg, current_stream = find_stream(client, stream_name) if stream_found: - if current_stream['StreamStatus'] != 'ACTIVE': - err_msg = ( - 'Retention Period for {0} is in the process of updating' - .format(stream_name) - ) + if current_stream["StreamStatus"] != "ACTIVE": + err_msg = "Retention Period for {0} is in the process of updating".format(stream_name) return success, changed, err_msg else: err_msg = ( - 'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}' - .format(current_stream.get('StreamStatus', 'UNKNOWN')) + "StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}".format( + current_stream.get("StreamStatus", "UNKNOWN") + ) ) return success, changed, err_msg - if current_stream['OpenShardsCount'] != number_of_shards: - success, err_msg = ( - update_shard_count(client, stream_name, number_of_shards, check_mode=check_mode) - ) + if current_stream["OpenShardsCount"] != number_of_shards: + success, err_msg = update_shard_count(client, stream_name, number_of_shards, check_mode=check_mode) if not success: return success, changed, err_msg @@ -775,47 +734,42 @@ def update(client, current_stream, stream_name, number_of_shards=1, retention_pe changed = True if wait: - wait_success, wait_msg, current_stream = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) + wait_success, wait_msg, current_stream = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) if not wait_success: return wait_success, changed, wait_msg else: - stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name) - ) - if stream_found and current_stream['StreamStatus'] != 'ACTIVE': - err_msg = ( - 'Number of shards for {0} is in the process of updating' - .format(stream_name) - ) + stream_found, stream_msg, current_stream = find_stream(client, stream_name) + if stream_found and current_stream["StreamStatus"] != "ACTIVE": + err_msg = "Number of shards for {0} is in the process of updating".format(stream_name) return success, changed, err_msg if tags: - tag_success, tag_changed, err_msg = ( - update_tags(client, stream_name, tags, check_mode=check_mode) - ) + tag_success, tag_changed, err_msg = update_tags(client, stream_name, tags, check_mode=check_mode) changed |= tag_changed if wait: - success, err_msg, status_stream = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) + success, err_msg, status_stream = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) if success and changed: - err_msg = 'Kinesis Stream {0} updated successfully.'.format(stream_name) + err_msg = "Kinesis Stream {0} updated successfully.".format(stream_name) elif success and not changed: - err_msg = 'Kinesis Stream {0} did not change.'.format(stream_name) + err_msg = "Kinesis Stream {0} did not change.".format(stream_name) return success, changed, err_msg -def create_stream(client, stream_name, number_of_shards=1, retention_period=None, - tags=None, wait=False, wait_timeout=300, check_mode=False): +def create_stream( + client, + stream_name, + number_of_shards=1, + retention_period=None, + tags=None, + wait=False, + wait_timeout=300, + check_mode=False, +): """Create an Amazon Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -847,99 +801,74 @@ def create_stream(client, stream_name, number_of_shards=1, retention_period=None """ success = False changed = False - err_msg = '' + err_msg = "" results = dict() - stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name) - ) + stream_found, stream_msg, current_stream = find_stream(client, stream_name) - if stream_found and current_stream.get('StreamStatus') == 'DELETING' and wait: - wait_success, wait_msg, current_stream = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) + if stream_found and current_stream.get("StreamStatus") == "DELETING" and wait: + wait_success, wait_msg, current_stream = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) - if stream_found and current_stream.get('StreamStatus') != 'DELETING': + if stream_found and current_stream.get("StreamStatus") != "DELETING": success, changed, err_msg = update( - client, current_stream, stream_name, number_of_shards, - retention_period, tags, wait, wait_timeout, check_mode=check_mode + client, + current_stream, + stream_name, + number_of_shards, + retention_period, + tags, + wait, + wait_timeout, + check_mode=check_mode, ) else: - create_success, create_msg = ( - stream_action( - client, stream_name, number_of_shards, action='create', - check_mode=check_mode - ) + create_success, create_msg = stream_action( + client, stream_name, number_of_shards, action="create", check_mode=check_mode ) if not create_success: changed = True - err_msg = 'Failed to create Kinesis stream: {0}'.format(create_msg) + err_msg = "Failed to create Kinesis stream: {0}".format(create_msg) return False, True, err_msg, {} else: changed = True if wait: - wait_success, wait_msg, results = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) - ) - err_msg = ( - 'Kinesis Stream {0} is in the process of being created' - .format(stream_name) + wait_success, wait_msg, results = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) + err_msg = "Kinesis Stream {0} is in the process of being created".format(stream_name) if not wait_success: return wait_success, True, wait_msg, results else: - err_msg = ( - 'Kinesis Stream {0} created successfully' - .format(stream_name) - ) + err_msg = "Kinesis Stream {0} created successfully".format(stream_name) if tags: - changed, err_msg = ( - tags_action( - client, stream_name, tags, action='create', - check_mode=check_mode - ) - ) + changed, err_msg = tags_action(client, stream_name, tags, action="create", check_mode=check_mode) if changed: success = True if not success: return success, changed, err_msg, results - stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name) - ) - if retention_period and current_stream.get('StreamStatus') == 'ACTIVE': - changed, err_msg = ( - retention_action( - client, stream_name, retention_period, action='increase', - check_mode=check_mode - ) + stream_found, stream_msg, current_stream = find_stream(client, stream_name) + if retention_period and current_stream.get("StreamStatus") == "ACTIVE": + changed, err_msg = retention_action( + client, stream_name, retention_period, action="increase", check_mode=check_mode ) if changed: success = True if not success: return success, changed, err_msg, results else: - err_msg = ( - 'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}' - .format(current_stream.get('StreamStatus', 'UNKNOWN')) + err_msg = "StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}".format( + current_stream.get("StreamStatus", "UNKNOWN") ) success = create_success changed = True if success: - stream_found, stream_msg, results = ( - find_stream(client, stream_name) - ) - tag_success, tag_msg, current_tags = ( - get_tags(client, stream_name) - ) + stream_found, stream_msg, results = find_stream(client, stream_name) + tag_success, tag_msg, current_tags = get_tags(client, stream_name) if check_mode: current_tags = tags @@ -947,13 +876,12 @@ def create_stream(client, stream_name, number_of_shards=1, retention_period=None current_tags = dict() results = camel_dict_to_snake_dict(results) - results['tags'] = current_tags + results["tags"] = current_tags return success, changed, err_msg, results -def delete_stream(client, stream_name, wait=False, wait_timeout=300, - check_mode=False): +def delete_stream(client, stream_name, wait=False, wait_timeout=300, check_mode=False): """Delete an Amazon Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -977,44 +905,33 @@ def delete_stream(client, stream_name, wait=False, wait_timeout=300, """ success = False changed = False - err_msg = '' + err_msg = "" results = dict() - stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name) - ) + stream_found, stream_msg, current_stream = find_stream(client, stream_name) if stream_found: - success, err_msg = ( - stream_action( - client, stream_name, action='delete', check_mode=check_mode - ) - ) + success, err_msg = stream_action(client, stream_name, action="delete", check_mode=check_mode) if success: changed = True if wait: - success, err_msg, results = ( - wait_for_status( - client, stream_name, 'DELETING', wait_timeout, - check_mode=check_mode - ) + success, err_msg, results = wait_for_status( + client, stream_name, "DELETING", wait_timeout, check_mode=check_mode ) - err_msg = 'Stream {0} deleted successfully'.format(stream_name) + err_msg = "Stream {0} deleted successfully".format(stream_name) if not success: return success, True, err_msg, results else: - err_msg = ( - 'Stream {0} is in the process of being deleted' - .format(stream_name) - ) + err_msg = "Stream {0} is in the process of being deleted".format(stream_name) else: success = True changed = False - err_msg = 'Stream {0} does not exist'.format(stream_name) + err_msg = "Stream {0} does not exist".format(stream_name) return success, changed, err_msg, results -def start_stream_encryption(client, stream_name, encryption_type='', key_id='', - wait=False, wait_timeout=300, check_mode=False): +def start_stream_encryption( + client, stream_name, encryption_type="", key_id="", wait=False, wait_timeout=300, check_mode=False +): """Start encryption on an Amazon Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -1042,65 +959,56 @@ def start_stream_encryption(client, stream_name, encryption_type='', key_id='', """ success = False changed = False - err_msg = '' - params = { - 'StreamName': stream_name - } + err_msg = "" + params = {"StreamName": stream_name} results = dict() - stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name) - ) + stream_found, stream_msg, current_stream = find_stream(client, stream_name) if stream_found: - if (current_stream.get("EncryptionType") == encryption_type and current_stream.get("KeyId") == key_id): + if current_stream.get("EncryptionType") == encryption_type and current_stream.get("KeyId") == key_id: changed = False success = True - err_msg = 'Kinesis Stream {0} encryption already configured.'.format(stream_name) + err_msg = "Kinesis Stream {0} encryption already configured.".format(stream_name) else: - success, err_msg = ( - stream_encryption_action( - client, stream_name, action='start_encryption', encryption_type=encryption_type, key_id=key_id, check_mode=check_mode - ) + success, err_msg = stream_encryption_action( + client, + stream_name, + action="start_encryption", + encryption_type=encryption_type, + key_id=key_id, + check_mode=check_mode, ) if success: changed = True if wait: - success, err_msg, results = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) + success, err_msg, results = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) - err_msg = 'Kinesis Stream {0} encryption started successfully.'.format(stream_name) + err_msg = "Kinesis Stream {0} encryption started successfully.".format(stream_name) if not success: return success, True, err_msg, results else: - err_msg = ( - 'Kinesis Stream {0} is in the process of starting encryption.'.format(stream_name) - ) + err_msg = "Kinesis Stream {0} is in the process of starting encryption.".format(stream_name) else: success = True changed = False - err_msg = 'Kinesis Stream {0} does not exist'.format(stream_name) + err_msg = "Kinesis Stream {0} does not exist".format(stream_name) if success: - stream_found, stream_msg, results = ( - find_stream(client, stream_name) - ) - tag_success, tag_msg, current_tags = ( - get_tags(client, stream_name) - ) + stream_found, stream_msg, results = find_stream(client, stream_name) + tag_success, tag_msg, current_tags = get_tags(client, stream_name) if not current_tags: current_tags = dict() results = camel_dict_to_snake_dict(results) - results['tags'] = current_tags + results["tags"] = current_tags return success, changed, err_msg, results -def stop_stream_encryption(client, stream_name, encryption_type='', key_id='', - wait=True, wait_timeout=300, check_mode=False): +def stop_stream_encryption( + client, stream_name, encryption_type="", key_id="", wait=True, wait_timeout=300, check_mode=False +): """Stop encryption on an Amazon Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -1126,57 +1034,47 @@ def stop_stream_encryption(client, stream_name, encryption_type='', key_id='', """ success = False changed = False - err_msg = '' - params = { - 'StreamName': stream_name - } + err_msg = "" + params = {"StreamName": stream_name} results = dict() - stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name) - ) + stream_found, stream_msg, current_stream = find_stream(client, stream_name) if stream_found: - if current_stream.get('EncryptionType') == 'KMS': - success, err_msg = ( - stream_encryption_action( - client, stream_name, action='stop_encryption', key_id=key_id, encryption_type=encryption_type, check_mode=check_mode - ) + if current_stream.get("EncryptionType") == "KMS": + success, err_msg = stream_encryption_action( + client, + stream_name, + action="stop_encryption", + key_id=key_id, + encryption_type=encryption_type, + check_mode=check_mode, ) changed = success if wait: - success, err_msg, results = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) + success, err_msg, results = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) if not success: return success, True, err_msg, results - err_msg = 'Kinesis Stream {0} encryption stopped successfully.'.format(stream_name) + err_msg = "Kinesis Stream {0} encryption stopped successfully.".format(stream_name) else: - err_msg = ( - 'Stream {0} is in the process of stopping encryption.'.format(stream_name) - ) - elif current_stream.get('EncryptionType') == 'NONE': + err_msg = "Stream {0} is in the process of stopping encryption.".format(stream_name) + elif current_stream.get("EncryptionType") == "NONE": success = True - err_msg = 'Kinesis Stream {0} encryption already stopped.'.format(stream_name) + err_msg = "Kinesis Stream {0} encryption already stopped.".format(stream_name) else: success = True changed = False - err_msg = 'Stream {0} does not exist.'.format(stream_name) + err_msg = "Stream {0} does not exist.".format(stream_name) if success: - stream_found, stream_msg, results = ( - find_stream(client, stream_name) - ) - tag_success, tag_msg, current_tags = ( - get_tags(client, stream_name) - ) + stream_found, stream_msg, results = find_stream(client, stream_name) + tag_success, tag_msg, current_tags = get_tags(client, stream_name) if not current_tags: current_tags = dict() results = camel_dict_to_snake_dict(results) - results['tags'] = current_tags + results["tags"] = current_tags return success, changed, err_msg, results @@ -1184,78 +1082,65 @@ def stop_stream_encryption(client, stream_name, encryption_type='', key_id='', def main(): argument_spec = dict( name=dict(required=True), - shards=dict(default=None, required=False, type='int'), - retention_period=dict(default=None, required=False, type='int'), - tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']), - wait=dict(default=True, required=False, type='bool'), - wait_timeout=dict(default=300, required=False, type='int'), - state=dict(default='present', choices=['present', 'absent']), - encryption_type=dict(required=False, choices=['NONE', 'KMS']), - key_id=dict(required=False, type='str'), - encryption_state=dict(required=False, choices=['enabled', 'disabled']), + shards=dict(default=None, required=False, type="int"), + retention_period=dict(default=None, required=False, type="int"), + tags=dict(default=None, required=False, type="dict", aliases=["resource_tags"]), + wait=dict(default=True, required=False, type="bool"), + wait_timeout=dict(default=300, required=False, type="int"), + state=dict(default="present", choices=["present", "absent"]), + encryption_type=dict(required=False, choices=["NONE", "KMS"]), + key_id=dict(required=False, type="str"), + encryption_state=dict(required=False, choices=["enabled", "disabled"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, ) - retention_period = module.params.get('retention_period') - stream_name = module.params.get('name') - shards = module.params.get('shards') - state = module.params.get('state') - tags = module.params.get('tags') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - encryption_type = module.params.get('encryption_type') - key_id = module.params.get('key_id') - encryption_state = module.params.get('encryption_state') + retention_period = module.params.get("retention_period") + stream_name = module.params.get("name") + shards = module.params.get("shards") + state = module.params.get("state") + tags = module.params.get("tags") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") + encryption_type = module.params.get("encryption_type") + key_id = module.params.get("key_id") + encryption_state = module.params.get("encryption_state") - if state == 'present' and not shards: - module.fail_json(msg='Shards is required when state == present.') + if state == "present" and not shards: + module.fail_json(msg="Shards is required when state == present.") if retention_period: if retention_period < 24: - module.fail_json(msg='Retention period can not be less than 24 hours.') + module.fail_json(msg="Retention period can not be less than 24 hours.") check_mode = module.check_mode try: - client = module.client('kinesis') + client = module.client("kinesis") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - if state == 'present': - success, changed, err_msg, results = ( - create_stream( - client, stream_name, shards, retention_period, tags, - wait, wait_timeout, check_mode - ) + if state == "present": + success, changed, err_msg, results = create_stream( + client, stream_name, shards, retention_period, tags, wait, wait_timeout, check_mode ) - if encryption_state == 'enabled': - success, changed, err_msg, results = ( - start_stream_encryption( - client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode - ) + if encryption_state == "enabled": + success, changed, err_msg, results = start_stream_encryption( + client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode ) - elif encryption_state == 'disabled': - success, changed, err_msg, results = ( - stop_stream_encryption( - client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode - ) + elif encryption_state == "disabled": + success, changed, err_msg, results = stop_stream_encryption( + client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode ) - elif state == 'absent': - success, changed, err_msg, results = ( - delete_stream(client, stream_name, wait, wait_timeout, check_mode) - ) + elif state == "absent": + success, changed, err_msg, results = delete_stream(client, stream_name, wait, wait_timeout, check_mode) if success: - module.exit_json( - success=success, changed=changed, msg=err_msg, **results - ) + module.exit_json(success=success, changed=changed, msg=err_msg, **results) else: - module.fail_json( - success=success, changed=changed, msg=err_msg, result=results - ) + module.fail_json(success=success, changed=changed, msg=err_msg, result=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/lightsail.py b/plugins/modules/lightsail.py index 0739a042316..6fb83b26b1f 100644 --- a/plugins/modules/lightsail.py +++ b/plugins/modules/lightsail.py @@ -201,16 +201,15 @@ def find_instance_info(module, client, instance_name, fail_if_not_found=False): - try: res = client.get_instance(instanceName=instance_name) - except is_boto3_error_code('NotFoundException') as e: + except is_boto3_error_code("NotFoundException") as e: if fail_if_not_found: module.fail_json_aws(e) return None except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e) - return res['instance'] + return res["instance"] def wait_for_instance_state(module, client, instance_name, states): @@ -218,19 +217,21 @@ def wait_for_instance_state(module, client, instance_name, states): `states` is a list of instance states that we are waiting for. """ - wait_timeout = module.params.get('wait_timeout') + wait_timeout = module.params.get("wait_timeout") wait_max = time.time() + wait_timeout while wait_max > time.time(): try: instance = find_instance_info(module, client, instance_name) - if instance['state']['name'] in states: + if instance["state"]["name"] in states: break time.sleep(5) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) else: - module.fail_json(msg='Timed out waiting for instance "{0}" to get to one of the following states -' - ' {1}'.format(instance_name, states)) + module.fail_json( + msg='Timed out waiting for instance "{0}" to get to one of the following states -' + " {1}".format(instance_name, states) + ) def update_public_ports(module, client, instance_name): @@ -244,7 +245,6 @@ def update_public_ports(module, client, instance_name): def create_or_update_instance(module, client, instance_name): - inst = find_instance_info(module, client, instance_name) if not inst: @@ -256,18 +256,18 @@ def create_or_update_instance(module, client, instance_name): "userData": module.params.get("user_data"), } - key_pair_name = module.params.get('key_pair_name') + key_pair_name = module.params.get("key_pair_name") if key_pair_name: - create_params['keyPairName'] = key_pair_name + create_params["keyPairName"] = key_pair_name try: client.create_instances(**create_params) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) - wait = module.params.get('wait') + wait = module.params.get("wait") if wait: - desired_states = ['running'] + desired_states = ["running"] wait_for_instance_state(module, client, instance_name, desired_states) if module.params.get("public_ports") is not None: @@ -281,7 +281,6 @@ def create_or_update_instance(module, client, instance_name): def delete_instance(module, client, instance_name): - changed = False inst = find_instance_info(module, client, instance_name) @@ -289,7 +288,7 @@ def delete_instance(module, client, instance_name): module.exit_json(changed=changed, instance={}) # Wait for instance to exit transition state before deleting - desired_states = ['running', 'stopped'] + desired_states = ["running", "stopped"] wait_for_instance_state(module, client, instance_name, desired_states) try: @@ -330,13 +329,13 @@ def start_or_stop_instance(module, client, instance_name, state): inst = find_instance_info(module, client, instance_name, fail_if_not_found=True) # Wait for instance to exit transition state before state change - desired_states = ['running', 'stopped'] + desired_states = ["running", "stopped"] wait_for_instance_state(module, client, instance_name, desired_states) # Try state change - if inst and inst['state']['name'] != state: + if inst and inst["state"]["name"] != state: try: - if state == 'running': + if state == "running": client.start_instance(instanceName=instance_name) else: client.stop_instance(instanceName=instance_name) @@ -346,7 +345,7 @@ def start_or_stop_instance(module, client, instance_name, state): # Grab current instance info inst = find_instance_info(module, client, instance_name) - wait = module.params.get('wait') + wait = module.params.get("wait") if wait: desired_states = [state] wait_for_instance_state(module, client, instance_name, desired_states) @@ -356,7 +355,6 @@ def start_or_stop_instance(module, client, instance_name, state): def main(): - argument_spec = dict( name=dict(type="str", required=True), state=dict( @@ -383,23 +381,24 @@ def main(): ), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[['state', 'present', ('zone', 'blueprint_id', 'bundle_id')]]) + module = AnsibleAWSModule( + argument_spec=argument_spec, required_if=[["state", "present", ("zone", "blueprint_id", "bundle_id")]] + ) - client = module.client('lightsail') + client = module.client("lightsail") - name = module.params.get('name') - state = module.params.get('state') + name = module.params.get("name") + state = module.params.get("state") - if state == 'present': + if state == "present": create_or_update_instance(module, client, name) - elif state == 'absent': + elif state == "absent": delete_instance(module, client, name) - elif state in ('running', 'stopped'): + elif state in ("running", "stopped"): start_or_stop_instance(module, client, name, state) - elif state in ('restarted', 'rebooted'): + elif state in ("restarted", "rebooted"): restart_instance(module, client, name) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/lightsail_static_ip.py b/plugins/modules/lightsail_static_ip.py index 961f451a4ee..40d10a86bb1 100644 --- a/plugins/modules/lightsail_static_ip.py +++ b/plugins/modules/lightsail_static_ip.py @@ -76,25 +76,23 @@ def find_static_ip_info(module, client, static_ip_name, fail_if_not_found=False): - try: res = client.get_static_ip(staticIpName=static_ip_name) - except is_boto3_error_code('NotFoundException') as e: + except is_boto3_error_code("NotFoundException") as e: if fail_if_not_found: module.fail_json_aws(e) return None except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e) - return res['staticIp'] + return res["staticIp"] def create_static_ip(module, client, static_ip_name): - inst = find_static_ip_info(module, client, static_ip_name) if inst: module.exit_json(changed=False, static_ip=camel_dict_to_snake_dict(inst)) else: - create_params = {'staticIpName': static_ip_name} + create_params = {"staticIpName": static_ip_name} try: client.allocate_static_ip(**create_params) @@ -107,7 +105,6 @@ def create_static_ip(module, client, static_ip_name): def delete_static_ip(module, client, static_ip_name): - inst = find_static_ip_info(module, client, static_ip_name) if inst is None: module.exit_json(changed=False, static_ip={}) @@ -123,24 +120,23 @@ def delete_static_ip(module, client, static_ip_name): def main(): - argument_spec = dict( - name=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent']), + name=dict(type="str", required=True), + state=dict(type="str", default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule(argument_spec=argument_spec) - client = module.client('lightsail') + client = module.client("lightsail") - name = module.params.get('name') - state = module.params.get('state') + name = module.params.get("name") + state = module.params.get("state") - if state == 'present': + if state == "present": create_static_ip(module, client, name) - elif state == 'absent': + elif state == "absent": delete_static_ip(module, client, name) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/mq_broker_info.py b/plugins/modules/mq_broker_info.py index 65a3524db41..da04596f589 100644 --- a/plugins/modules/mq_broker_info.py +++ b/plugins/modules/mq_broker_info.py @@ -84,7 +84,10 @@ def get_broker_info(conn, module, broker_id): def main(): argument_spec = dict(broker_id=dict(type="str"), broker_name=dict(type="str")) required_one_of = ( - ("broker_name", "broker_id",), + ( + "broker_name", + "broker_id", + ), ) module = AnsibleAWSModule( diff --git a/plugins/modules/msk_cluster.py b/plugins/modules/msk_cluster.py index 6bf143509ae..960ae115bcb 100644 --- a/plugins/modules/msk_cluster.py +++ b/plugins/modules/msk_cluster.py @@ -341,9 +341,7 @@ def wait_for_cluster_state(client, module, arn, state="ACTIVE"): return if time.time() - start > timeout: module.fail_json( - msg="Timeout waiting for cluster {0} (desired state is '{1}')".format( - current_state, state - ) + msg="Timeout waiting for cluster {0} (desired state is '{1}')".format(current_state, state) ) time.sleep(check_interval) @@ -364,7 +362,7 @@ def prepare_create_options(module): "BrokerNodeGroupInfo": { "ClientSubnets": module.params["subnets"], "InstanceType": module.params["instance_type"], - } + }, } if module.params["security_groups"] and len(module.params["security_groups"]) != 0: @@ -372,9 +370,7 @@ def prepare_create_options(module): if module.params["ebs_volume_size"]: c_params["BrokerNodeGroupInfo"]["StorageInfo"] = { - "EbsStorageInfo": { - "VolumeSize": module.params.get("ebs_volume_size") - } + "EbsStorageInfo": {"VolumeSize": module.params.get("ebs_volume_size")} } if module.params["encryption"]: @@ -385,7 +381,7 @@ def prepare_create_options(module): } c_params["EncryptionInfo"]["EncryptionInTransit"] = { "ClientBroker": module.params["encryption"]["in_transit"].get("client_broker", "TLS"), - "InCluster": module.params["encryption"]["in_transit"].get("in_cluster", True) + "InCluster": module.params["encryption"]["in_transit"].get("in_cluster", True), } if module.params["authentication"]: @@ -425,12 +421,8 @@ def prepare_open_monitoring_options(module): open_monitoring = module.params["open_monitoring"] or {} m_params["OpenMonitoring"] = { "Prometheus": { - "JmxExporter": { - "EnabledInBroker": open_monitoring.get("jmx_exporter", False) - }, - "NodeExporter": { - "EnabledInBroker": open_monitoring.get("node_exporter", False) - } + "JmxExporter": {"EnabledInBroker": open_monitoring.get("jmx_exporter", False)}, + "NodeExporter": {"EnabledInBroker": open_monitoring.get("node_exporter", False)}, } } return m_params @@ -442,36 +434,26 @@ def prepare_logging_options(module): if logging.get("cloudwatch"): l_params["CloudWatchLogs"] = { "Enabled": module.params["logging"]["cloudwatch"].get("enabled"), - "LogGroup": module.params["logging"]["cloudwatch"].get("log_group") + "LogGroup": module.params["logging"]["cloudwatch"].get("log_group"), } else: - l_params["CloudWatchLogs"] = { - "Enabled": False - } + l_params["CloudWatchLogs"] = {"Enabled": False} if logging.get("firehose"): l_params["Firehose"] = { "Enabled": module.params["logging"]["firehose"].get("enabled"), - "DeliveryStream": module.params["logging"]["firehose"].get("delivery_stream") + "DeliveryStream": module.params["logging"]["firehose"].get("delivery_stream"), } else: - l_params["Firehose"] = { - "Enabled": False - } + l_params["Firehose"] = {"Enabled": False} if logging.get("s3"): l_params["S3"] = { "Enabled": module.params["logging"]["s3"].get("enabled"), "Bucket": module.params["logging"]["s3"].get("bucket"), - "Prefix": module.params["logging"]["s3"].get("prefix") + "Prefix": module.params["logging"]["s3"].get("prefix"), } else: - l_params["S3"] = { - "Enabled": False - } - return { - "LoggingInfo": { - "BrokerLogs": l_params - } - } + l_params["S3"] = {"Enabled": False} + return {"LoggingInfo": {"BrokerLogs": l_params}} def create_or_update_cluster(client, module): @@ -485,7 +467,6 @@ def create_or_update_cluster(client, module): cluster = find_cluster_by_name(client, module, module.params["name"]) if not cluster: - changed = True if module.check_mode: @@ -505,7 +486,6 @@ def create_or_update_cluster(client, module): wait_for_cluster_state(client, module, arn=response["ClusterArn"], state="ACTIVE") else: - response["ClusterArn"] = cluster["ClusterArn"] response["changes"] = {} @@ -514,9 +494,7 @@ def create_or_update_cluster(client, module): "broker_count": { "current_value": cluster["NumberOfBrokerNodes"], "target_value": module.params.get("nodes"), - "update_params": { - "TargetNumberOfBrokerNodes": module.params.get("nodes") - } + "update_params": {"TargetNumberOfBrokerNodes": module.params.get("nodes")}, }, "broker_storage": { "current_value": cluster["BrokerNodeGroupInfo"]["StorageInfo"]["EbsStorageInfo"]["VolumeSize"], @@ -525,14 +503,12 @@ def create_or_update_cluster(client, module): "TargetBrokerEBSVolumeInfo": [ {"KafkaBrokerNodeId": "All", "VolumeSizeGB": module.params.get("ebs_volume_size")} ] - } + }, }, "broker_type": { "current_value": cluster["BrokerNodeGroupInfo"]["InstanceType"], "target_value": module.params.get("instance_type"), - "update_params": { - "TargetInstanceType": module.params.get("instance_type") - } + "update_params": {"TargetInstanceType": module.params.get("instance_type")}, }, "cluster_configuration": { "current_value": { @@ -546,44 +522,37 @@ def create_or_update_cluster(client, module): "update_params": { "ConfigurationInfo": { "Arn": module.params.get("configuration_arn"), - "Revision": module.params.get("configuration_revision") + "Revision": module.params.get("configuration_revision"), } - } + }, }, "cluster_kafka_version": { "current_value": cluster["CurrentBrokerSoftwareInfo"]["KafkaVersion"], "target_value": module.params.get("version"), - "update_params": { - "TargetKafkaVersion": module.params.get("version") - } + "update_params": {"TargetKafkaVersion": module.params.get("version")}, }, "enhanced_monitoring": { "current_value": cluster["EnhancedMonitoring"], "target_value": module.params.get("enhanced_monitoring"), "update_method": "update_monitoring", - "update_params": prepare_enhanced_monitoring_options(module) + "update_params": prepare_enhanced_monitoring_options(module), }, "open_monitoring": { - "current_value": { - "OpenMonitoring": cluster["OpenMonitoring"] - }, + "current_value": {"OpenMonitoring": cluster["OpenMonitoring"]}, "target_value": prepare_open_monitoring_options(module), "update_method": "update_monitoring", - "update_params": prepare_open_monitoring_options(module) + "update_params": prepare_open_monitoring_options(module), }, "logging": { - "current_value": { - "LoggingInfo": cluster["LoggingInfo"] - }, + "current_value": {"LoggingInfo": cluster["LoggingInfo"]}, "target_value": prepare_logging_options(module), "update_method": "update_monitoring", - "update_params": prepare_logging_options(module) - } + "update_params": prepare_logging_options(module), + }, } for method, options in msk_cluster_changes.items(): - - if 'botocore_version' in options: + if "botocore_version" in options: if not module.botocore_at_least(options["botocore_version"]): continue @@ -612,17 +581,13 @@ def create_or_update_cluster(client, module): ) try: response["changes"][method] = update_method( - ClusterArn=cluster["ClusterArn"], - CurrentVersion=version, - **options["update_params"] + ClusterArn=cluster["ClusterArn"], CurrentVersion=version, **options["update_params"] ) except ( botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: - module.fail_json_aws( - e, "Failed to update cluster via 'update_{0}'".format(method) - ) + module.fail_json_aws(e, "Failed to update cluster via 'update_{0}'".format(method)) if module.params["wait"]: wait_for_cluster_state(client, module, arn=cluster["ClusterArn"], state="ACTIVE") @@ -633,13 +598,13 @@ def create_or_update_cluster(client, module): def update_cluster_tags(client, module, arn): - new_tags = module.params.get('tags') + new_tags = module.params.get("tags") if new_tags is None: return False - purge_tags = module.params.get('purge_tags') + purge_tags = module.params.get("purge_tags") try: - existing_tags = client.list_tags_for_resource(ResourceArn=arn, aws_retry=True)['Tags'] + existing_tags = client.list_tags_for_resource(ResourceArn=arn, aws_retry=True)["Tags"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to retrieve tags for cluster '{0}'".format(arn)) @@ -659,7 +624,6 @@ def update_cluster_tags(client, module, arn): def delete_cluster(client, module): - cluster = find_cluster_by_name(client, module, module.params["name"]) if module.check_mode: @@ -688,7 +652,6 @@ def delete_cluster(client, module): def main(): - module_args = dict( name=dict(type="str", required=True), state=dict(type="str", choices=["present", "absent"], default="present"), @@ -717,10 +680,7 @@ def main(): type="dict", options=dict( in_cluster=dict(type="bool", default=True), - client_broker=dict( - choices=["TLS", "TLS_PLAINTEXT", "PLAINTEXT"], - default="TLS" - ), + client_broker=dict(choices=["TLS", "TLS_PLAINTEXT", "PLAINTEXT"], default="TLS"), ), ), ), @@ -780,23 +740,21 @@ def main(): ), wait=dict(type="bool", default=False), wait_timeout=dict(type="int", default=3600), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), ) module = AnsibleAWSModule( argument_spec=module_args, - required_if=[['state', 'present', ['version', 'configuration_arn', 'configuration_revision', 'subnets']]], - supports_check_mode=True + required_if=[["state", "present", ["version", "configuration_arn", "configuration_revision", "subnets"]]], + supports_check_mode=True, ) client = module.client("kafka", retry_decorator=AWSRetry.jittered_backoff()) if module.params["state"] == "present": if len(module.params["subnets"]) < 2: - module.fail_json( - msg="At least two client subnets should be provided" - ) + module.fail_json(msg="At least two client subnets should be provided") if int(module.params["nodes"]) % int(len(module.params["subnets"])) != 0: module.fail_json( msg="The number of broker nodes must be a multiple of availability zones in the subnets parameter" @@ -813,9 +771,7 @@ def main(): bootstrap_broker_string = {} if response.get("ClusterArn") and module.params["state"] == "present": try: - cluster_info = client.describe_cluster(ClusterArn=response["ClusterArn"], aws_retry=True)[ - "ClusterInfo" - ] + cluster_info = client.describe_cluster(ClusterArn=response["ClusterArn"], aws_retry=True)["ClusterInfo"] if cluster_info.get("State") == "ACTIVE": brokers = client.get_bootstrap_brokers(ClusterArn=response["ClusterArn"], aws_retry=True) if brokers.get("BootstrapBrokerString"): @@ -828,9 +784,7 @@ def main(): ) as e: module.fail_json_aws( e, - "Can not obtain information about cluster {0}".format( - response["ClusterArn"] - ), + "Can not obtain information about cluster {0}".format(response["ClusterArn"]), ) module.exit_json( diff --git a/plugins/modules/msk_config.py b/plugins/modules/msk_config.py index 8dce485410f..5b67cd9924f 100644 --- a/plugins/modules/msk_config.py +++ b/plugins/modules/msk_config.py @@ -143,19 +143,13 @@ def find_active_config(client, module): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="failed to obtain kafka configurations") - active_configs = list( - item - for item in all_configs - if item["Name"] == name and item["State"] == "ACTIVE" - ) + active_configs = list(item for item in all_configs if item["Name"] == name and item["State"] == "ACTIVE") if active_configs: if len(active_configs) == 1: return active_configs[0] else: - module.fail_json_aws( - msg="found more than one active config with name '{0}'".format(name) - ) + module.fail_json_aws(msg="found more than one active config with name '{0}'".format(name)) return None @@ -192,7 +186,6 @@ def create_config(client, module): # create new configuration if not config: - if module.check_mode: return True, {} @@ -202,7 +195,7 @@ def create_config(client, module): Description=module.params.get("description"), KafkaVersions=module.params.get("kafka_versions"), ServerProperties=dict_to_prop(module.params.get("config")).encode(), - aws_retry=True + aws_retry=True, ) except ( botocore.exceptions.BotoCoreError, @@ -213,7 +206,9 @@ def create_config(client, module): # update existing configuration (creates new revision) else: # it's required because 'config' doesn't contain 'ServerProperties' - response = get_configuration_revision(client, module, arn=config["Arn"], revision=config["LatestRevision"]["Revision"]) + response = get_configuration_revision( + client, module, arn=config["Arn"], revision=config["LatestRevision"]["Revision"] + ) if not is_configuration_changed(module, response): return False, response @@ -226,7 +221,7 @@ def create_config(client, module): Arn=config["Arn"], Description=module.params.get("description"), ServerProperties=dict_to_prop(module.params.get("config")).encode(), - aws_retry=True + aws_retry=True, ) except ( botocore.exceptions.BotoCoreError, @@ -267,7 +262,6 @@ def delete_config(client, module): def main(): - module_args = dict( name=dict(type="str", required=True), description=dict(type="str", default=""), diff --git a/plugins/modules/networkfirewall.py b/plugins/modules/networkfirewall.py index 9460701cc9a..2cab7e26dfc 100644 --- a/plugins/modules/networkfirewall.py +++ b/plugins/modules/networkfirewall.py @@ -274,29 +274,28 @@ def main(): - argument_spec = dict( - name=dict(type='str', required=False, aliases=['firewall_name']), - arn=dict(type='str', required=False, aliases=['firewall_arn']), - state=dict(type='str', required=False, default='present', choices=['present', 'absent']), - description=dict(type='str', required=False), - tags=dict(type='dict', required=False, aliases=['resource_tags']), - purge_tags=dict(type='bool', required=False, default=True), - wait=dict(type='bool', required=False, default=True), - wait_timeout=dict(type='int', required=False), - subnet_change_protection=dict(type='bool', required=False), - policy_change_protection=dict(type='bool', required=False, aliases=['firewall_policy_change_protection']), - delete_protection=dict(type='bool', required=False), - subnets=dict(type='list', elements='str', required=False), - purge_subnets=dict(type='bool', required=False, default=True), - policy=dict(type='str', required=False, aliases=['firewall_policy_arn']), + name=dict(type="str", required=False, aliases=["firewall_name"]), + arn=dict(type="str", required=False, aliases=["firewall_arn"]), + state=dict(type="str", required=False, default="present", choices=["present", "absent"]), + description=dict(type="str", required=False), + tags=dict(type="dict", required=False, aliases=["resource_tags"]), + purge_tags=dict(type="bool", required=False, default=True), + wait=dict(type="bool", required=False, default=True), + wait_timeout=dict(type="int", required=False), + subnet_change_protection=dict(type="bool", required=False), + policy_change_protection=dict(type="bool", required=False, aliases=["firewall_policy_change_protection"]), + delete_protection=dict(type="bool", required=False), + subnets=dict(type="list", elements="str", required=False), + purge_subnets=dict(type="bool", required=False, default=True), + policy=dict(type="str", required=False, aliases=["firewall_policy_arn"]), ) mutually_exclusive = [ - ('arn', 'name',) + ["arn", "name"], ] required_one_of = [ - ('arn', 'name',) + ["arn", "name"], ] module = AnsibleAWSModule( @@ -306,30 +305,30 @@ def main(): required_one_of=required_one_of, ) - arn = module.params.get('arn') - name = module.params.get('name') - state = module.params.get('state') + arn = module.params.get("arn") + name = module.params.get("name") + state = module.params.get("state") manager = NetworkFirewallManager(module, name=name, arn=arn) - manager.set_wait(module.params.get('wait', None)) - manager.set_wait_timeout(module.params.get('wait_timeout', None)) + manager.set_wait(module.params.get("wait", None)) + manager.set_wait_timeout(module.params.get("wait_timeout", None)) - if state == 'absent': - manager.set_delete_protection(module.params.get('delete_protection', None)) + if state == "absent": + manager.set_delete_protection(module.params.get("delete_protection", None)) manager.delete() else: if not manager.original_resource: - if not module.params.get('subnets', None): - module.fail_json('The subnets parameter must be provided on creation.') - if not module.params.get('policy', None): - module.fail_json('The policy parameter must be provided on creation.') - manager.set_description(module.params.get('description', None)) - manager.set_tags(module.params.get('tags', None), module.params.get('purge_tags', None)) - manager.set_subnet_change_protection(module.params.get('subnet_change_protection', None)) - manager.set_policy_change_protection(module.params.get('policy_change_protection', None)) - manager.set_delete_protection(module.params.get('delete_protection', None)) - manager.set_subnets(module.params.get('subnets', None), module.params.get('purge_subnets', None)) - manager.set_policy(module.params.get('policy', None)) + if not module.params.get("subnets", None): + module.fail_json("The subnets parameter must be provided on creation.") + if not module.params.get("policy", None): + module.fail_json("The policy parameter must be provided on creation.") + manager.set_description(module.params.get("description", None)) + manager.set_tags(module.params.get("tags", None), module.params.get("purge_tags", None)) + manager.set_subnet_change_protection(module.params.get("subnet_change_protection", None)) + manager.set_policy_change_protection(module.params.get("policy_change_protection", None)) + manager.set_delete_protection(module.params.get("delete_protection", None)) + manager.set_subnets(module.params.get("subnets", None), module.params.get("purge_subnets", None)) + manager.set_policy(module.params.get("policy", None)) manager.flush_changes() results = dict( @@ -341,9 +340,9 @@ def main(): before=manager.original_resource, after=manager.updated_resource, ) - results['diff'] = diff + results["diff"] = diff module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/networkfirewall_info.py b/plugins/modules/networkfirewall_info.py index 70395f75d9e..262a31067b8 100644 --- a/plugins/modules/networkfirewall_info.py +++ b/plugins/modules/networkfirewall_info.py @@ -190,24 +190,23 @@ def main(): - argument_spec = dict( - name=dict(type='str', required=False), - arn=dict(type='str', required=False), - vpc_ids=dict(type='list', required=False, elements='str', aliases=['vpcs', 'vpc_id']), + name=dict(type="str", required=False), + arn=dict(type="str", required=False), + vpc_ids=dict(type="list", required=False, elements="str", aliases=["vpcs", "vpc_id"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ('arn', 'name', 'vpc_ids',), + ["arn", "name", "vpc_ids"], ], ) - arn = module.params.get('arn') - name = module.params.get('name') - vpcs = module.params.get('vpc_ids') + arn = module.params.get("arn") + name = module.params.get("name") + vpcs = module.params.get("vpc_ids") manager = NetworkFirewallManager(module) @@ -216,20 +215,20 @@ def main(): if name or arn: firewall = manager.get_firewall(name=name, arn=arn) if firewall: - results['firewalls'] = [firewall] + results["firewalls"] = [firewall] else: - results['firewalls'] = [] + results["firewalls"] = [] else: if vpcs: firewall_list = manager.list(vpc_ids=vpcs) else: firewall_list = manager.list() - results['firewall_list'] = firewall_list + results["firewall_list"] = firewall_list firewalls = [manager.get_firewall(arn=f) for f in firewall_list] - results['firewalls'] = firewalls + results["firewalls"] = firewalls module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/networkfirewall_policy.py b/plugins/modules/networkfirewall_policy.py index 61affcbc9ab..a1d389fe732 100644 --- a/plugins/modules/networkfirewall_policy.py +++ b/plugins/modules/networkfirewall_policy.py @@ -340,40 +340,46 @@ def main(): - custom_action_options = dict( - name=dict(type='str', required=True), + name=dict(type="str", required=True), # Poorly documented, but "publishMetricAction.dimensions ... must have length less than or equal to 1" - publish_metric_dimension_value=dict(type='str', required=False, aliases=['publish_metric_dimension_values']), + publish_metric_dimension_value=dict(type="str", required=False, aliases=["publish_metric_dimension_values"]), # NetworkFirewallPolicyManager can cope with a list for future-proofing # publish_metric_dimension_values=dict(type='list', elements='str', required=False, aliases=['publish_metric_dimension_value']), ) argument_spec = dict( - name=dict(type='str', required=False), - arn=dict(type='str', required=False), - state=dict(type='str', required=False, default='present', choices=['present', 'absent']), - description=dict(type='str', required=False), - tags=dict(type='dict', required=False, aliases=['resource_tags']), - purge_tags=dict(type='bool', required=False, default=True), - stateful_rule_groups=dict(type='list', elements='str', required=False, aliases=['stateful_groups']), - stateless_rule_groups=dict(type='list', elements='str', required=False, aliases=['stateless_groups']), - stateful_default_actions=dict(type='list', elements='str', required=False), - stateless_default_actions=dict(type='list', elements='str', required=False), - stateless_fragment_default_actions=dict(type='list', elements='str', required=False), - stateful_rule_order=dict(type='str', required=False, choices=['strict', 'default'], aliases=['rule_order']), - stateless_custom_actions=dict(type='list', elements='dict', required=False, - options=custom_action_options, aliases=['custom_stateless_actions']), - purge_stateless_custom_actions=dict(type='bool', required=False, default=True, aliases=['purge_custom_stateless_actions']), - wait=dict(type='bool', required=False, default=True), - wait_timeout=dict(type='int', required=False), + name=dict(type="str", required=False), + arn=dict(type="str", required=False), + state=dict(type="str", required=False, default="present", choices=["present", "absent"]), + description=dict(type="str", required=False), + tags=dict(type="dict", required=False, aliases=["resource_tags"]), + purge_tags=dict(type="bool", required=False, default=True), + stateful_rule_groups=dict(type="list", elements="str", required=False, aliases=["stateful_groups"]), + stateless_rule_groups=dict(type="list", elements="str", required=False, aliases=["stateless_groups"]), + stateful_default_actions=dict(type="list", elements="str", required=False), + stateless_default_actions=dict(type="list", elements="str", required=False), + stateless_fragment_default_actions=dict(type="list", elements="str", required=False), + stateful_rule_order=dict(type="str", required=False, choices=["strict", "default"], aliases=["rule_order"]), + stateless_custom_actions=dict( + type="list", + elements="dict", + required=False, + options=custom_action_options, + aliases=["custom_stateless_actions"], + ), + purge_stateless_custom_actions=dict( + type="bool", required=False, default=True, aliases=["purge_custom_stateless_actions"] + ), + wait=dict(type="bool", required=False, default=True), + wait_timeout=dict(type="int", required=False), ) mutually_exclusive = [ - ('arn', 'name',) + ["arn", "name"], ] required_one_of = [ - ('arn', 'name',) + ["arn", "name"], ] module = AnsibleAWSModule( @@ -383,36 +389,36 @@ def main(): required_one_of=required_one_of, ) - arn = module.params.get('arn') - name = module.params.get('name') - state = module.params.get('state') + arn = module.params.get("arn") + name = module.params.get("name") + state = module.params.get("state") manager = NetworkFirewallPolicyManager(module, name=name, arn=arn) - manager.set_wait(module.params.get('wait', None)) - manager.set_wait_timeout(module.params.get('wait_timeout', None)) + manager.set_wait(module.params.get("wait", None)) + manager.set_wait_timeout(module.params.get("wait_timeout", None)) - rule_order = module.params.get('stateful_rule_order') + rule_order = module.params.get("stateful_rule_order") if rule_order and rule_order != "default": - module.require_botocore_at_least('1.21.52', reason='to set the rule order') - if module.params.get('stateful_default_actions'): - module.require_botocore_at_least( - '1.21.52', reason='to set the default actions for stateful flows') + module.require_botocore_at_least("1.21.52", reason="to set the rule order") + if module.params.get("stateful_default_actions"): + module.require_botocore_at_least("1.21.52", reason="to set the default actions for stateful flows") - if state == 'absent': + if state == "absent": manager.delete() else: - manager.set_description(module.params.get('description', None)) - manager.set_tags(module.params.get('tags', None), module.params.get('purge_tags', None)) + manager.set_description(module.params.get("description", None)) + manager.set_tags(module.params.get("tags", None), module.params.get("purge_tags", None)) # Actions need to be defined before potentially consuming them manager.set_custom_stateless_actions( - module.params.get('stateless_custom_actions', None), - module.params.get('purge_stateless_custom_actions', True)), - manager.set_stateful_rule_order(module.params.get('stateful_rule_order', None)) - manager.set_stateful_rule_groups(module.params.get('stateful_rule_groups', None)) - manager.set_stateless_rule_groups(module.params.get('stateless_rule_groups', None)) - manager.set_stateful_default_actions(module.params.get('stateful_default_actions', None)) - manager.set_stateless_default_actions(module.params.get('stateless_default_actions', None)) - manager.set_stateless_fragment_default_actions(module.params.get('stateless_fragment_default_actions', None)) + module.params.get("stateless_custom_actions", None), + module.params.get("purge_stateless_custom_actions", True), + ), + manager.set_stateful_rule_order(module.params.get("stateful_rule_order", None)) + manager.set_stateful_rule_groups(module.params.get("stateful_rule_groups", None)) + manager.set_stateless_rule_groups(module.params.get("stateless_rule_groups", None)) + manager.set_stateful_default_actions(module.params.get("stateful_default_actions", None)) + manager.set_stateless_default_actions(module.params.get("stateless_default_actions", None)) + manager.set_stateless_fragment_default_actions(module.params.get("stateless_fragment_default_actions", None)) manager.flush_changes() @@ -425,9 +431,9 @@ def main(): before=manager.original_resource, after=manager.updated_resource, ) - results['diff'] = diff + results["diff"] = diff module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/networkfirewall_policy_info.py b/plugins/modules/networkfirewall_policy_info.py index 9f0de62e119..3bb92174513 100644 --- a/plugins/modules/networkfirewall_policy_info.py +++ b/plugins/modules/networkfirewall_policy_info.py @@ -218,22 +218,21 @@ def main(): - argument_spec = dict( - name=dict(type='str', required=False), - arn=dict(type='str', required=False), + name=dict(type="str", required=False), + arn=dict(type="str", required=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ('arn', 'name',), + ["arn", "name"], ], ) - arn = module.params.get('arn') - name = module.params.get('name') + arn = module.params.get("arn") + name = module.params.get("name") manager = NetworkFirewallPolicyManager(module) @@ -242,17 +241,17 @@ def main(): if name or arn: policy = manager.get_policy(name=name, arn=arn) if policy: - results['policies'] = [policy] + results["policies"] = [policy] else: - results['policies'] = [] + results["policies"] = [] else: policy_list = manager.list() - results['policy_list'] = policy_list + results["policy_list"] = policy_list policies = [manager.get_policy(arn=p) for p in policy_list] - results['policies'] = policies + results["policies"] = policies module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/networkfirewall_rule_group.py b/plugins/modules/networkfirewall_rule_group.py index 2a10b6f4e69..a7800568619 100644 --- a/plugins/modules/networkfirewall_rule_group.py +++ b/plugins/modules/networkfirewall_rule_group.py @@ -712,101 +712,102 @@ def main(): - domain_list_spec = dict( - domain_names=dict(type='list', elements='str', required=True), - filter_http=dict(type='bool', required=False, default=False), - filter_https=dict(type='bool', required=False, default=False), - action=dict(type='str', required=True, choices=['allow', 'deny']), - source_ips=dict(type='list', elements='str', required=False), + domain_names=dict(type="list", elements="str", required=True), + filter_http=dict(type="bool", required=False, default=False), + filter_https=dict(type="bool", required=False, default=False), + action=dict(type="str", required=True, choices=["allow", "deny"]), + source_ips=dict(type="list", elements="str", required=False), ) rule_list_spec = dict( - action=dict(type='str', required=True, choices=['pass', 'drop', 'alert']), - protocol=dict(type='str', required=True), - source=dict(type='str', required=True), - source_port=dict(type='str', required=True), - direction=dict(type='str', required=False, default='forward', choices=['forward', 'any']), - destination=dict(type='str', required=True), - destination_port=dict(type='str', required=True), - sid=dict(type='int', required=True), - rule_options=dict(type='dict', required=False), + action=dict(type="str", required=True, choices=["pass", "drop", "alert"]), + protocol=dict(type="str", required=True), + source=dict(type="str", required=True), + source_port=dict(type="str", required=True), + direction=dict(type="str", required=False, default="forward", choices=["forward", "any"]), + destination=dict(type="str", required=True), + destination_port=dict(type="str", required=True), + sid=dict(type="int", required=True), + rule_options=dict(type="dict", required=False), ) argument_spec = dict( - arn=dict(type='str', required=False), - name=dict(type='str', required=False), - rule_type=dict(type='str', required=False, aliases=['type'], choices=['stateful']), + arn=dict(type="str", required=False), + name=dict(type="str", required=False), + rule_type=dict(type="str", required=False, aliases=["type"], choices=["stateful"]), # rule_type=dict(type='str', required=True, aliases=['type'], choices=['stateless', 'stateful']), - state=dict(type='str', required=False, choices=['present', 'absent'], default='present'), - capacity=dict(type='int', required=False), - rule_order=dict(type='str', required=False, aliases=['stateful_rule_order'], choices=['default', 'strict']), - description=dict(type='str', required=False), - ip_variables=dict(type='dict', required=False, aliases=['ip_set_variables']), - purge_ip_variables=dict(type='bool', required=False, aliases=['purge_ip_set_variables'], default=True), - port_variables=dict(type='dict', required=False, aliases=['port_set_variables']), - purge_port_variables=dict(type='bool', required=False, aliases=['purge_port_set_variables'], default=True), - rule_strings=dict(type='list', elements='str', required=False), - domain_list=dict(type='dict', options=domain_list_spec, required=False), - rule_list=dict(type='list', elements='dict', aliases=['stateful_rule_list'], options=rule_list_spec, required=False), - tags=dict(type='dict', required=False, aliases=['resource_tags']), - purge_tags=dict(type='bool', required=False, default=True), - wait=dict(type='bool', required=False, default=True), - wait_timeout=dict(type='int', required=False), + state=dict(type="str", required=False, choices=["present", "absent"], default="present"), + capacity=dict(type="int", required=False), + rule_order=dict(type="str", required=False, aliases=["stateful_rule_order"], choices=["default", "strict"]), + description=dict(type="str", required=False), + ip_variables=dict(type="dict", required=False, aliases=["ip_set_variables"]), + purge_ip_variables=dict(type="bool", required=False, aliases=["purge_ip_set_variables"], default=True), + port_variables=dict(type="dict", required=False, aliases=["port_set_variables"]), + purge_port_variables=dict(type="bool", required=False, aliases=["purge_port_set_variables"], default=True), + rule_strings=dict(type="list", elements="str", required=False), + domain_list=dict(type="dict", options=domain_list_spec, required=False), + rule_list=dict( + type="list", elements="dict", aliases=["stateful_rule_list"], options=rule_list_spec, required=False + ), + tags=dict(type="dict", required=False, aliases=["resource_tags"]), + purge_tags=dict(type="bool", required=False, default=True), + wait=dict(type="bool", required=False, default=True), + wait_timeout=dict(type="int", required=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ('name', 'arn'), - ('rule_strings', 'domain_list', 'rule_list'), - ('domain_list', 'ip_variables'), + ["name", "arn"], + ["rule_strings", "domain_list", "rule_list"], + ["domain_list", "ip_variables"], ], required_together=[ - ('name', 'rule_type'), + ["name", "rule_type"], ], required_one_of=[ - ('name', 'arn'), + ["name", "arn"], ], ) - module.require_botocore_at_least('1.19.20') + module.require_botocore_at_least("1.19.20") - state = module.params.get('state') - name = module.params.get('name') - arn = module.params.get('arn') - rule_type = module.params.get('rule_type') + state = module.params.get("state") + name = module.params.get("name") + arn = module.params.get("arn") + rule_type = module.params.get("rule_type") - if rule_type == 'stateless': - if module.params.get('rule_order'): - module.fail_json('rule_order can not be set for stateless rule groups') - if module.params.get('rule_strings'): - module.fail_json('rule_strings can only be used for stateful rule groups') - if module.params.get('rule_list'): - module.fail_json('rule_list can only be used for stateful rule groups') - if module.params.get('domain_list'): - module.fail_json('domain_list can only be used for stateful rule groups') + if rule_type == "stateless": + if module.params.get("rule_order"): + module.fail_json("rule_order can not be set for stateless rule groups") + if module.params.get("rule_strings"): + module.fail_json("rule_strings can only be used for stateful rule groups") + if module.params.get("rule_list"): + module.fail_json("rule_list can only be used for stateful rule groups") + if module.params.get("domain_list"): + module.fail_json("domain_list can only be used for stateful rule groups") - if module.params.get('rule_order'): - module.require_botocore_at_least('1.23.23', reason='to set the rule order') + if module.params.get("rule_order"): + module.require_botocore_at_least("1.23.23", reason="to set the rule order") manager = NetworkFirewallRuleManager(module, arn=arn, name=name, rule_type=rule_type) - manager.set_wait(module.params.get('wait', None)) - manager.set_wait_timeout(module.params.get('wait_timeout', None)) + manager.set_wait(module.params.get("wait", None)) + manager.set_wait_timeout(module.params.get("wait_timeout", None)) - if state == 'absent': + if state == "absent": manager.delete() else: - manager.set_description(module.params.get('description')) - manager.set_capacity(module.params.get('capacity')) - manager.set_rule_order(module.params.get('rule_order')) - manager.set_ip_variables(module.params.get('ip_variables'), module.params.get('purge_ip_variables')) - manager.set_port_variables(module.params.get('port_variables'), module.params.get('purge_port_variables')) - manager.set_rule_string(module.params.get('rule_strings')) - manager.set_domain_list(module.params.get('domain_list')) - manager.set_rule_list(module.params.get('rule_list')) - manager.set_tags(module.params.get('tags'), module.params.get('purge_tags')) + manager.set_description(module.params.get("description")) + manager.set_capacity(module.params.get("capacity")) + manager.set_rule_order(module.params.get("rule_order")) + manager.set_ip_variables(module.params.get("ip_variables"), module.params.get("purge_ip_variables")) + manager.set_port_variables(module.params.get("port_variables"), module.params.get("purge_port_variables")) + manager.set_rule_string(module.params.get("rule_strings")) + manager.set_domain_list(module.params.get("domain_list")) + manager.set_rule_list(module.params.get("rule_list")) + manager.set_tags(module.params.get("tags"), module.params.get("purge_tags")) manager.flush_changes() @@ -819,9 +820,9 @@ def main(): before=manager.original_resource, after=manager.updated_resource, ) - results['diff'] = diff + results["diff"] = diff module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/networkfirewall_rule_group_info.py b/plugins/modules/networkfirewall_rule_group_info.py index bcd83070b42..6d2dabe31c5 100644 --- a/plugins/modules/networkfirewall_rule_group_info.py +++ b/plugins/modules/networkfirewall_rule_group_info.py @@ -393,35 +393,34 @@ def main(): - argument_spec = dict( - name=dict(type='str', required=False), - rule_type=dict(type='str', required=False, aliases=['type'], choices=['stateless', 'stateful']), - arn=dict(type='str', required=False), - scope=dict(type='str', required=False, choices=['managed', 'account']), + name=dict(type="str", required=False), + rule_type=dict(type="str", required=False, aliases=["type"], choices=["stateless", "stateful"]), + arn=dict(type="str", required=False), + scope=dict(type="str", required=False, choices=["managed", "account"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ('arn', 'name',), - ('arn', 'rule_type'), + ["arn", "name"], + ["arn", "rule_type"], ], required_together=[ - ('name', 'rule_type'), - ] + ["name", "rule_type"], + ], ) - module.require_botocore_at_least('1.19.20') + module.require_botocore_at_least("1.19.20") - arn = module.params.get('arn') - name = module.params.get('name') - rule_type = module.params.get('rule_type') - scope = module.params.get('scope') + arn = module.params.get("arn") + name = module.params.get("name") + rule_type = module.params.get("rule_type") + scope = module.params.get("scope") - if module.params.get('scope') == 'managed': - module.require_botocore_at_least('1.23.23', reason='to list managed rules') + if module.params.get("scope") == "managed": + module.require_botocore_at_least("1.23.23", reason="to list managed rules") manager = NetworkFirewallRuleManager(module, name=name, rule_type=rule_type) @@ -430,18 +429,18 @@ def main(): if name or arn: rule = manager.get_rule_group(name=name, rule_type=rule_type, arn=arn) if rule: - results['rule_groups'] = [rule] + results["rule_groups"] = [rule] else: - results['rule_groups'] = [] + results["rule_groups"] = [] else: rule_list = manager.list(scope=scope) - results['rule_list'] = rule_list - if scope != 'managed': + results["rule_list"] = rule_list + if scope != "managed": rules = [manager.get_rule_group(arn=r) for r in rule_list] - results['rule_groups'] = rules + results["rule_groups"] = rules module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/opensearch.py b/plugins/modules/opensearch.py index e6635da499b..88055d1a6dd 100644 --- a/plugins/modules/opensearch.py +++ b/plugins/modules/opensearch.py @@ -514,16 +514,17 @@ def ensure_domain_absent(client, module): domain = get_domain_status(client, module, domain_name) if module.check_mode: - module.exit_json( - changed=True, msg="Would have deleted domain if not in check mode" - ) + module.exit_json(changed=True, msg="Would have deleted domain if not in check mode") try: client.delete_domain(DomainName=domain_name) changed = True except is_boto3_error_code("ResourceNotFoundException"): # The resource does not exist, or it has already been deleted return dict(changed=False) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="trying to delete domain") # If we're not waiting for a delete to complete then we're all done @@ -535,7 +536,10 @@ def ensure_domain_absent(client, module): return dict(changed=changed) except is_boto3_error_code("ResourceNotFoundException"): return dict(changed=changed) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, "awaiting domain deletion") @@ -560,8 +564,11 @@ def upgrade_domain(client, module, source_version, target_engine_version): # It's not possible to upgrade directly to the target version. # Check the module parameters to determine if this is allowed or not. if not module.params.get("allow_intermediate_upgrades"): - module.fail_json(msg="Cannot upgrade from {0} to version {1}. The highest compatible version is {2}".format( - source_version, target_engine_version, next_version)) + module.fail_json( + msg="Cannot upgrade from {0} to version {1}. The highest compatible version is {2}".format( + source_version, target_engine_version, next_version + ) + ) parameters = { "DomainName": domain_name, @@ -584,9 +591,7 @@ def upgrade_domain(client, module, source_version, target_engine_version): # raised if it's not possible to upgrade to the target version. module.fail_json_aws( e, - msg="Couldn't upgrade domain {0} from {1} to {2}".format( - domain_name, current_version, next_version - ), + msg="Couldn't upgrade domain {0} from {1} to {2}".format(domain_name, current_version, next_version), ) if module.check_mode: @@ -602,9 +607,7 @@ def upgrade_domain(client, module, source_version, target_engine_version): wait_for_domain_status(client, module, domain_name, "domain_available") -def set_cluster_config( - module, current_domain_config, desired_domain_config, change_set -): +def set_cluster_config(module, current_domain_config, desired_domain_config, change_set): changed = False cluster_config = desired_domain_config["ClusterConfig"] @@ -619,24 +622,16 @@ def set_cluster_config( if cluster_config["ZoneAwarenessEnabled"]: if cluster_opts.get("availability_zone_count") is not None: cluster_config["ZoneAwarenessConfig"] = { - "AvailabilityZoneCount": cluster_opts.get( - "availability_zone_count" - ), + "AvailabilityZoneCount": cluster_opts.get("availability_zone_count"), } if cluster_opts.get("dedicated_master") is not None: - cluster_config["DedicatedMasterEnabled"] = cluster_opts.get( - "dedicated_master" - ) + cluster_config["DedicatedMasterEnabled"] = cluster_opts.get("dedicated_master") if cluster_config["DedicatedMasterEnabled"]: if cluster_opts.get("dedicated_master_instance_type") is not None: - cluster_config["DedicatedMasterType"] = cluster_opts.get( - "dedicated_master_instance_type" - ) + cluster_config["DedicatedMasterType"] = cluster_opts.get("dedicated_master_instance_type") if cluster_opts.get("dedicated_master_instance_count") is not None: - cluster_config["DedicatedMasterCount"] = cluster_opts.get( - "dedicated_master_instance_count" - ) + cluster_config["DedicatedMasterCount"] = cluster_opts.get("dedicated_master_instance_count") if cluster_opts.get("warm_enabled") is not None: cluster_config["WarmEnabled"] = cluster_opts.get("warm_enabled") @@ -657,31 +652,20 @@ def set_cluster_config( if cold_storage_opts is not None and cold_storage_opts.get("enabled"): module.fail_json(msg="Cold Storage is not supported") cluster_config.pop("ColdStorageOptions", None) - if ( - current_domain_config is not None - and "ClusterConfig" in current_domain_config - ): + if current_domain_config is not None and "ClusterConfig" in current_domain_config: # Remove 'ColdStorageOptions' from the current domain config, otherwise the actual vs desired diff # will indicate a change must be done. current_domain_config["ClusterConfig"].pop("ColdStorageOptions", None) else: # Elasticsearch 7.9 and above support ColdStorageOptions. - if ( - cold_storage_opts is not None - and cold_storage_opts.get("enabled") is not None - ): + if cold_storage_opts is not None and cold_storage_opts.get("enabled") is not None: cluster_config["ColdStorageOptions"] = { "Enabled": cold_storage_opts.get("enabled"), } - if ( - current_domain_config is not None - and current_domain_config["ClusterConfig"] != cluster_config - ): + if current_domain_config is not None and current_domain_config["ClusterConfig"] != cluster_config: change_set.append( - "ClusterConfig changed from {0} to {1}".format( - current_domain_config["ClusterConfig"], cluster_config - ) + "ClusterConfig changed from {0} to {1}".format(current_domain_config["ClusterConfig"], cluster_config) ) changed = True return changed @@ -708,22 +692,13 @@ def set_ebs_options(module, current_domain_config, desired_domain_config, change if ebs_opts.get("iops") is not None: ebs_config["Iops"] = ebs_opts.get("iops") - if ( - current_domain_config is not None - and current_domain_config["EBSOptions"] != ebs_config - ): - change_set.append( - "EBSOptions changed from {0} to {1}".format( - current_domain_config["EBSOptions"], ebs_config - ) - ) + if current_domain_config is not None and current_domain_config["EBSOptions"] != ebs_config: + change_set.append("EBSOptions changed from {0} to {1}".format(current_domain_config["EBSOptions"], ebs_config)) changed = True return changed -def set_encryption_at_rest_options( - module, current_domain_config, desired_domain_config, change_set -): +def set_encryption_at_rest_options(module, current_domain_config, desired_domain_config, change_set): changed = False encryption_at_rest_config = desired_domain_config["EncryptionAtRestOptions"] encryption_at_rest_opts = module.params.get("encryption_at_rest_options") @@ -737,14 +712,11 @@ def set_encryption_at_rest_options( } else: if encryption_at_rest_opts.get("kms_key_id") is not None: - encryption_at_rest_config["KmsKeyId"] = encryption_at_rest_opts.get( - "kms_key_id" - ) + encryption_at_rest_config["KmsKeyId"] = encryption_at_rest_opts.get("kms_key_id") if ( current_domain_config is not None - and current_domain_config["EncryptionAtRestOptions"] - != encryption_at_rest_config + and current_domain_config["EncryptionAtRestOptions"] != encryption_at_rest_config ): change_set.append( "EncryptionAtRestOptions changed from {0} to {1}".format( @@ -756,25 +728,18 @@ def set_encryption_at_rest_options( return changed -def set_node_to_node_encryption_options( - module, current_domain_config, desired_domain_config, change_set -): +def set_node_to_node_encryption_options(module, current_domain_config, desired_domain_config, change_set): changed = False - node_to_node_encryption_config = desired_domain_config[ - "NodeToNodeEncryptionOptions" - ] + node_to_node_encryption_config = desired_domain_config["NodeToNodeEncryptionOptions"] node_to_node_encryption_opts = module.params.get("node_to_node_encryption_options") if node_to_node_encryption_opts is None: return changed if node_to_node_encryption_opts.get("enabled") is not None: - node_to_node_encryption_config["Enabled"] = node_to_node_encryption_opts.get( - "enabled" - ) + node_to_node_encryption_config["Enabled"] = node_to_node_encryption_opts.get("enabled") if ( current_domain_config is not None - and current_domain_config["NodeToNodeEncryptionOptions"] - != node_to_node_encryption_config + and current_domain_config["NodeToNodeEncryptionOptions"] != node_to_node_encryption_config ): change_set.append( "NodeToNodeEncryptionOptions changed from {0} to {1}".format( @@ -838,9 +803,7 @@ def set_vpc_options(module, current_domain_config, desired_domain_config, change pass else: # Note the subnets may be the same but be listed in a different order. - if set(current_domain_config["VPCOptions"]["SubnetIds"]) != set( - vpc_config["SubnetIds"] - ): + if set(current_domain_config["VPCOptions"]["SubnetIds"]) != set(vpc_config["SubnetIds"]): change_set.append( "SubnetIds changed from {0} to {1}".format( current_domain_config["VPCOptions"]["SubnetIds"], @@ -848,9 +811,7 @@ def set_vpc_options(module, current_domain_config, desired_domain_config, change ) ) changed = True - if set(current_domain_config["VPCOptions"]["SecurityGroupIds"]) != set( - vpc_config["SecurityGroupIds"] - ): + if set(current_domain_config["VPCOptions"]["SecurityGroupIds"]) != set(vpc_config["SecurityGroupIds"]): change_set.append( "SecurityGroup changed from {0} to {1}".format( current_domain_config["VPCOptions"]["SecurityGroupIds"], @@ -861,30 +822,21 @@ def set_vpc_options(module, current_domain_config, desired_domain_config, change return changed -def set_snapshot_options( - module, current_domain_config, desired_domain_config, change_set -): +def set_snapshot_options(module, current_domain_config, desired_domain_config, change_set): changed = False snapshot_config = desired_domain_config["SnapshotOptions"] snapshot_opts = module.params.get("snapshot_options") if snapshot_opts is None: return changed if snapshot_opts.get("automated_snapshot_start_hour") is not None: - snapshot_config["AutomatedSnapshotStartHour"] = snapshot_opts.get( - "automated_snapshot_start_hour" - ) - if ( - current_domain_config is not None - and current_domain_config["SnapshotOptions"] != snapshot_config - ): + snapshot_config["AutomatedSnapshotStartHour"] = snapshot_opts.get("automated_snapshot_start_hour") + if current_domain_config is not None and current_domain_config["SnapshotOptions"] != snapshot_config: change_set.append("SnapshotOptions changed") changed = True return changed -def set_cognito_options( - module, current_domain_config, desired_domain_config, change_set -): +def set_cognito_options(module, current_domain_config, desired_domain_config, change_set): changed = False cognito_config = desired_domain_config["CognitoOptions"] cognito_opts = module.params.get("cognito_options") @@ -900,28 +852,19 @@ def set_cognito_options( if cognito_opts.get("cognito_user_pool_id") is not None: cognito_config["UserPoolId"] = cognito_opts.get("cognito_user_pool_id") if cognito_opts.get("cognito_identity_pool_id") is not None: - cognito_config["IdentityPoolId"] = cognito_opts.get( - "cognito_identity_pool_id" - ) + cognito_config["IdentityPoolId"] = cognito_opts.get("cognito_identity_pool_id") if cognito_opts.get("cognito_role_arn") is not None: cognito_config["RoleArn"] = cognito_opts.get("cognito_role_arn") - if ( - current_domain_config is not None - and current_domain_config["CognitoOptions"] != cognito_config - ): + if current_domain_config is not None and current_domain_config["CognitoOptions"] != cognito_config: change_set.append( - "CognitoOptions changed from {0} to {1}".format( - current_domain_config["CognitoOptions"], cognito_config - ) + "CognitoOptions changed from {0} to {1}".format(current_domain_config["CognitoOptions"], cognito_config) ) changed = True return changed -def set_advanced_security_options( - module, current_domain_config, desired_domain_config, change_set -): +def set_advanced_security_options(module, current_domain_config, desired_domain_config, change_set): changed = False advanced_security_config = desired_domain_config["AdvancedSecurityOptions"] advanced_security_opts = module.params.get("advanced_security_options") @@ -935,60 +878,44 @@ def set_advanced_security_options( } else: if advanced_security_opts.get("internal_user_database_enabled") is not None: - advanced_security_config[ - "InternalUserDatabaseEnabled" - ] = advanced_security_opts.get("internal_user_database_enabled") + advanced_security_config["InternalUserDatabaseEnabled"] = advanced_security_opts.get( + "internal_user_database_enabled" + ) master_user_opts = advanced_security_opts.get("master_user_options") if master_user_opts is not None: advanced_security_config.setdefault("MasterUserOptions", {}) if master_user_opts.get("master_user_arn") is not None: - advanced_security_config["MasterUserOptions"][ - "MasterUserARN" - ] = master_user_opts.get("master_user_arn") + advanced_security_config["MasterUserOptions"]["MasterUserARN"] = master_user_opts.get("master_user_arn") if master_user_opts.get("master_user_name") is not None: - advanced_security_config["MasterUserOptions"][ - "MasterUserName" - ] = master_user_opts.get("master_user_name") + advanced_security_config["MasterUserOptions"]["MasterUserName"] = master_user_opts.get( + "master_user_name" + ) if master_user_opts.get("master_user_password") is not None: - advanced_security_config["MasterUserOptions"][ - "MasterUserPassword" - ] = master_user_opts.get("master_user_password") + advanced_security_config["MasterUserOptions"]["MasterUserPassword"] = master_user_opts.get( + "master_user_password" + ) saml_opts = advanced_security_opts.get("saml_options") if saml_opts is not None: if saml_opts.get("enabled") is not None: - advanced_security_config["SamlOptions"]["Enabled"] = saml_opts.get( - "enabled" - ) + advanced_security_config["SamlOptions"]["Enabled"] = saml_opts.get("enabled") idp_opts = saml_opts.get("idp") if idp_opts is not None: if idp_opts.get("metadata_content") is not None: - advanced_security_config["SamlOptions"]["Idp"][ - "MetadataContent" - ] = idp_opts.get("metadata_content") + advanced_security_config["SamlOptions"]["Idp"]["MetadataContent"] = idp_opts.get("metadata_content") if idp_opts.get("entity_id") is not None: - advanced_security_config["SamlOptions"]["Idp"][ - "EntityId" - ] = idp_opts.get("entity_id") + advanced_security_config["SamlOptions"]["Idp"]["EntityId"] = idp_opts.get("entity_id") if saml_opts.get("master_user_name") is not None: - advanced_security_config["SamlOptions"][ - "MasterUserName" - ] = saml_opts.get("master_user_name") + advanced_security_config["SamlOptions"]["MasterUserName"] = saml_opts.get("master_user_name") if saml_opts.get("master_backend_role") is not None: - advanced_security_config["SamlOptions"][ - "MasterBackendRole" - ] = saml_opts.get("master_backend_role") + advanced_security_config["SamlOptions"]["MasterBackendRole"] = saml_opts.get("master_backend_role") if saml_opts.get("subject_key") is not None: - advanced_security_config["SamlOptions"]["SubjectKey"] = saml_opts.get( - "subject_key" - ) + advanced_security_config["SamlOptions"]["SubjectKey"] = saml_opts.get("subject_key") if saml_opts.get("roles_key") is not None: - advanced_security_config["SamlOptions"]["RolesKey"] = saml_opts.get( - "roles_key" - ) + advanced_security_config["SamlOptions"]["RolesKey"] = saml_opts.get("roles_key") if saml_opts.get("session_timeout_minutes") is not None: - advanced_security_config["SamlOptions"][ - "SessionTimeoutMinutes" - ] = saml_opts.get("session_timeout_minutes") + advanced_security_config["SamlOptions"]["SessionTimeoutMinutes"] = saml_opts.get( + "session_timeout_minutes" + ) if ( current_domain_config is not None @@ -1004,40 +931,27 @@ def set_advanced_security_options( return changed -def set_domain_endpoint_options( - module, current_domain_config, desired_domain_config, change_set -): +def set_domain_endpoint_options(module, current_domain_config, desired_domain_config, change_set): changed = False domain_endpoint_config = desired_domain_config["DomainEndpointOptions"] domain_endpoint_opts = module.params.get("domain_endpoint_options") if domain_endpoint_opts is None: return changed if domain_endpoint_opts.get("enforce_https") is not None: - domain_endpoint_config["EnforceHTTPS"] = domain_endpoint_opts.get( - "enforce_https" - ) + domain_endpoint_config["EnforceHTTPS"] = domain_endpoint_opts.get("enforce_https") if domain_endpoint_opts.get("tls_security_policy") is not None: - domain_endpoint_config["TLSSecurityPolicy"] = domain_endpoint_opts.get( - "tls_security_policy" - ) + domain_endpoint_config["TLSSecurityPolicy"] = domain_endpoint_opts.get("tls_security_policy") if domain_endpoint_opts.get("custom_endpoint_enabled") is not None: - domain_endpoint_config["CustomEndpointEnabled"] = domain_endpoint_opts.get( - "custom_endpoint_enabled" - ) + domain_endpoint_config["CustomEndpointEnabled"] = domain_endpoint_opts.get("custom_endpoint_enabled") if domain_endpoint_config["CustomEndpointEnabled"]: if domain_endpoint_opts.get("custom_endpoint") is not None: - domain_endpoint_config["CustomEndpoint"] = domain_endpoint_opts.get( - "custom_endpoint" - ) + domain_endpoint_config["CustomEndpoint"] = domain_endpoint_opts.get("custom_endpoint") if domain_endpoint_opts.get("custom_endpoint_certificate_arn") is not None: - domain_endpoint_config[ - "CustomEndpointCertificateArn" - ] = domain_endpoint_opts.get("custom_endpoint_certificate_arn") + domain_endpoint_config["CustomEndpointCertificateArn"] = domain_endpoint_opts.get( + "custom_endpoint_certificate_arn" + ) - if ( - current_domain_config is not None - and current_domain_config["DomainEndpointOptions"] != domain_endpoint_config - ): + if current_domain_config is not None and current_domain_config["DomainEndpointOptions"] != domain_endpoint_config: change_set.append( "DomainEndpointOptions changed from {0} to {1}".format( current_domain_config["DomainEndpointOptions"], domain_endpoint_config @@ -1047,9 +961,7 @@ def set_domain_endpoint_options( return changed -def set_auto_tune_options( - module, current_domain_config, desired_domain_config, change_set -): +def set_auto_tune_options(module, current_domain_config, desired_domain_config, change_set): changed = False auto_tune_config = desired_domain_config["AutoTuneOptions"] auto_tune_opts = module.params.get("auto_tune_options") @@ -1080,15 +992,10 @@ def set_auto_tune_options( if duration_opt.get("unit") is not None: schedule_entry["Duration"]["Unit"] = duration_opt.get("unit") if s.get("cron_expression_for_recurrence") is not None: - schedule_entry["CronExpressionForRecurrence"] = s.get( - "cron_expression_for_recurrence" - ) + schedule_entry["CronExpressionForRecurrence"] = s.get("cron_expression_for_recurrence") auto_tune_config["MaintenanceSchedules"].append(schedule_entry) if current_domain_config is not None: - if ( - current_domain_config["AutoTuneOptions"]["DesiredState"] - != auto_tune_config["DesiredState"] - ): + if current_domain_config["AutoTuneOptions"]["DesiredState"] != auto_tune_config["DesiredState"]: change_set.append( "AutoTuneOptions.DesiredState changed from {0} to {1}".format( current_domain_config["AutoTuneOptions"]["DesiredState"], @@ -1096,10 +1003,7 @@ def set_auto_tune_options( ) ) changed = True - if ( - auto_tune_config["MaintenanceSchedules"] - != current_domain_config["AutoTuneOptions"]["MaintenanceSchedules"] - ): + if auto_tune_config["MaintenanceSchedules"] != current_domain_config["AutoTuneOptions"]["MaintenanceSchedules"]: change_set.append( "AutoTuneOptions.MaintenanceSchedules changed from {0} to {1}".format( current_domain_config["AutoTuneOptions"]["MaintenanceSchedules"], @@ -1119,18 +1023,12 @@ def set_access_policy(module, current_domain_config, desired_domain_config, chan try: access_policy_config = json.dumps(access_policy_opt) except Exception as e: - module.fail_json( - msg="Failed to convert the policy into valid JSON: %s" % str(e) - ) + module.fail_json(msg="Failed to convert the policy into valid JSON: %s" % str(e)) if current_domain_config is not None: # Updating existing domain current_access_policy = json.loads(current_domain_config["AccessPolicies"]) if not compare_policies(current_access_policy, access_policy_opt): - change_set.append( - "AccessPolicy changed from {0} to {1}".format( - current_access_policy, access_policy_opt - ) - ) + change_set.append("AccessPolicy changed from {0} to {1}".format(current_access_policy, access_policy_opt)) changed = True desired_domain_config["AccessPolicies"] = access_policy_config else: @@ -1193,53 +1091,26 @@ def ensure_domain_present(client, module): # Validate the engine_version v = parse_version(module.params.get("engine_version")) if v is None: - module.fail_json( - "Invalid engine_version. Must be Elasticsearch_X.Y or OpenSearch_X.Y" - ) + module.fail_json("Invalid engine_version. Must be Elasticsearch_X.Y or OpenSearch_X.Y") desired_domain_config["EngineVersion"] = module.params.get("engine_version") changed = False change_set = [] # For check mode purpose - changed |= set_cluster_config( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_ebs_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_encryption_at_rest_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_node_to_node_encryption_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_vpc_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_snapshot_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_cognito_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_advanced_security_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_domain_endpoint_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_auto_tune_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_access_policy( - module, current_domain_config, desired_domain_config, change_set - ) + changed |= set_cluster_config(module, current_domain_config, desired_domain_config, change_set) + changed |= set_ebs_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_encryption_at_rest_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_node_to_node_encryption_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_vpc_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_snapshot_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_cognito_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_advanced_security_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_domain_endpoint_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_auto_tune_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_access_policy(module, current_domain_config, desired_domain_config, change_set) if current_domain_config is not None: - if ( - desired_domain_config["EngineVersion"] - != current_domain_config["EngineVersion"] - ): + if desired_domain_config["EngineVersion"] != current_domain_config["EngineVersion"]: changed = True change_set.append("EngineVersion changed") upgrade_domain( @@ -1263,22 +1134,16 @@ def ensure_domain_present(client, module): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: - module.fail_json_aws( - e, msg="Couldn't update domain {0}".format(domain_name) - ) + module.fail_json_aws(e, msg="Couldn't update domain {0}".format(domain_name)) else: # Create new OpenSearch cluster if module.params.get("access_policies") is None: - module.fail_json( - "state is present but the following is missing: access_policies" - ) + module.fail_json("state is present but the following is missing: access_policies") changed = True if module.check_mode: - module.exit_json( - changed=True, msg="Would have created a domain if not in check mode" - ) + module.exit_json(changed=True, msg="Would have created a domain if not in check mode") try: response = client.create_domain(**desired_domain_config) domain = response["DomainStatus"] @@ -1287,22 +1152,16 @@ def ensure_domain_present(client, module): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: - module.fail_json_aws( - e, msg="Couldn't update domain {0}".format(domain_name) - ) + module.fail_json_aws(e, msg="Couldn't update domain {0}".format(domain_name)) try: - existing_tags = boto3_tag_list_to_ansible_dict( - client.list_tags(ARN=domain_arn, aws_retry=True)["TagList"] - ) + existing_tags = boto3_tag_list_to_ansible_dict(client.list_tags(ARN=domain_arn, aws_retry=True)["TagList"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Couldn't get tags for domain %s" % domain_name) desired_tags = module.params["tags"] purge_tags = module.params["purge_tags"] - changed |= ensure_tags( - client, module, domain_arn, existing_tags, desired_tags, purge_tags - ) + changed |= ensure_tags(client, module, domain_arn, existing_tags, desired_tags, purge_tags) if module.params.get("wait") and not module.check_mode: wait_for_domain_status(client, module, domain_name, "domain_available") @@ -1313,7 +1172,6 @@ def ensure_domain_present(client, module): def main(): - module = AnsibleAWSModule( argument_spec=dict( state=dict(choices=["present", "absent"], default="present"), diff --git a/plugins/modules/opensearch_info.py b/plugins/modules/opensearch_info.py index 7d6d8bb94ac..ef49637f5cc 100644 --- a/plugins/modules/opensearch_info.py +++ b/plugins/modules/opensearch_info.py @@ -456,49 +456,52 @@ def domain_info(client, module): - domain_name = module.params.get('domain_name') - filter_tags = module.params.get('tags') + domain_name = module.params.get("domain_name") + filter_tags = module.params.get("tags") domain_list = [] if domain_name: domain_status = get_domain_status(client, module, domain_name) if domain_status: - domain_list.append({'DomainStatus': domain_status}) + domain_list.append({"DomainStatus": domain_status}) else: - domain_summary_list = client.list_domain_names()['DomainNames'] + domain_summary_list = client.list_domain_names()["DomainNames"] for d in domain_summary_list: - domain_status = get_domain_status(client, module, d['DomainName']) + domain_status = get_domain_status(client, module, d["DomainName"]) if domain_status: - domain_list.append({'DomainStatus': domain_status}) + domain_list.append({"DomainStatus": domain_status}) # Get the domain tags for domain in domain_list: current_domain_tags = None - domain_arn = domain['DomainStatus']['ARN'] + domain_arn = domain["DomainStatus"]["ARN"] try: current_domain_tags = client.list_tags(ARN=domain_arn, aws_retry=True)["TagList"] - domain['Tags'] = boto3_tag_list_to_ansible_dict(current_domain_tags) + domain["Tags"] = boto3_tag_list_to_ansible_dict(current_domain_tags) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # This could potentially happen if a domain is deleted between the time # its domain status was queried and the tags were queried. - domain['Tags'] = {} + domain["Tags"] = {} # Filter by tags if filter_tags: for tag_key in filter_tags: try: - domain_list = [c for c in domain_list if ('Tags' in c) and (tag_key in c['Tags']) and (c['Tags'][tag_key] == filter_tags[tag_key])] + domain_list = [ + c + for c in domain_list + if ("Tags" in c) and (tag_key in c["Tags"]) and (c["Tags"][tag_key] == filter_tags[tag_key]) + ] except (TypeError, AttributeError) as e: module.fail_json(msg="OpenSearch tag filtering error", exception=e) # Get the domain config for idx, domain in enumerate(domain_list): - domain_name = domain['DomainStatus']['DomainName'] + domain_name = domain["DomainStatus"]["DomainName"] (domain_config, arn) = get_domain_config(client, module, domain_name) if domain_config: - domain['DomainConfig'] = domain_config - domain_list[idx] = camel_dict_to_snake_dict(domain, - ignore_list=['AdvancedOptions', 'Endpoints', 'Tags']) + domain["DomainConfig"] = domain_config + domain_list[idx] = camel_dict_to_snake_dict(domain, ignore_list=["AdvancedOptions", "Endpoints", "Tags"]) return dict(changed=False, domains=domain_list) @@ -507,7 +510,7 @@ def main(): module = AnsibleAWSModule( argument_spec=dict( domain_name=dict(required=False), - tags=dict(type='dict', required=False), + tags=dict(type="dict", required=False), ), supports_check_mode=True, ) @@ -521,5 +524,5 @@ def main(): module.exit_json(**domain_info(client, module)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/redshift.py b/plugins/modules/redshift.py index e0efbefa02a..61b9e3aeb4a 100644 --- a/plugins/modules/redshift.py +++ b/plugins/modules/redshift.py @@ -276,10 +276,10 @@ def _ensure_tags(redshift, identifier, existing_tags, module): """Compares and update resource tags""" account_id = get_aws_account_id(module) - region = module.params.get('region') - resource_arn = "arn:aws:redshift:{0}:{1}:cluster:{2}" .format(region, account_id, identifier) - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') + region = module.params.get("region") + resource_arn = "arn:aws:redshift:{0}:{1}:cluster:{2}".format(region, account_id, identifier) + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") tags_to_add, tags_to_remove = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags), tags, purge_tags) @@ -302,78 +302,77 @@ def _ensure_tags(redshift, identifier, existing_tags, module): def _collect_facts(resource): """Transform cluster information to dict.""" facts = { - 'identifier': resource['ClusterIdentifier'], - 'status': resource['ClusterStatus'], - 'username': resource['MasterUsername'], - 'db_name': resource['DBName'], - 'maintenance_window': resource['PreferredMaintenanceWindow'], - 'enhanced_vpc_routing': resource['EnhancedVpcRouting'] - + "identifier": resource["ClusterIdentifier"], + "status": resource["ClusterStatus"], + "username": resource["MasterUsername"], + "db_name": resource["DBName"], + "maintenance_window": resource["PreferredMaintenanceWindow"], + "enhanced_vpc_routing": resource["EnhancedVpcRouting"], } - for node in resource['ClusterNodes']: - if node['NodeRole'] in ('SHARED', 'LEADER'): - facts['private_ip_address'] = node['PrivateIPAddress'] - if facts['enhanced_vpc_routing'] is False: - facts['public_ip_address'] = node['PublicIPAddress'] + for node in resource["ClusterNodes"]: + if node["NodeRole"] in ("SHARED", "LEADER"): + facts["private_ip_address"] = node["PrivateIPAddress"] + if facts["enhanced_vpc_routing"] is False: + facts["public_ip_address"] = node["PublicIPAddress"] else: - facts['public_ip_address'] = None + facts["public_ip_address"] = None break # Some parameters are not ready instantly if you don't wait for available # cluster status - facts['create_time'] = None - facts['url'] = None - facts['port'] = None - facts['availability_zone'] = None - facts['tags'] = {} - - if resource['ClusterStatus'] != "creating": - facts['create_time'] = resource['ClusterCreateTime'] - facts['url'] = resource['Endpoint']['Address'] - facts['port'] = resource['Endpoint']['Port'] - facts['availability_zone'] = resource['AvailabilityZone'] - facts['tags'] = boto3_tag_list_to_ansible_dict(resource['Tags']) + facts["create_time"] = None + facts["url"] = None + facts["port"] = None + facts["availability_zone"] = None + facts["tags"] = {} + + if resource["ClusterStatus"] != "creating": + facts["create_time"] = resource["ClusterCreateTime"] + facts["url"] = resource["Endpoint"]["Address"] + facts["port"] = resource["Endpoint"]["Port"] + facts["availability_zone"] = resource["AvailabilityZone"] + facts["tags"] = boto3_tag_list_to_ansible_dict(resource["Tags"]) return facts @AWSRetry.jittered_backoff() def _describe_cluster(redshift, identifier): - ''' + """ Basic wrapper around describe_clusters with a retry applied - ''' - return redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0] + """ + return redshift.describe_clusters(ClusterIdentifier=identifier)["Clusters"][0] @AWSRetry.jittered_backoff() def _create_cluster(redshift, **kwargs): - ''' + """ Basic wrapper around create_cluster with a retry applied - ''' + """ return redshift.create_cluster(**kwargs) # Simple wrapper around delete, try to avoid throwing an error if some other # operation is in progress -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidClusterState']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidClusterState"]) def _delete_cluster(redshift, **kwargs): - ''' + """ Basic wrapper around delete_cluster with a retry applied. Explicitly catches 'InvalidClusterState' (~ Operation in progress) so that we can still delete a cluster if some kind of change operation was in progress. - ''' + """ return redshift.delete_cluster(**kwargs) -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidClusterState']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidClusterState"]) def _modify_cluster(redshift, **kwargs): - ''' + """ Basic wrapper around modify_cluster with a retry applied. Explicitly catches 'InvalidClusterState' (~ Operation in progress) for cases where another modification is still in progress - ''' + """ return redshift.modify_cluster(**kwargs) @@ -387,59 +386,71 @@ def create_cluster(module, redshift): Returns: """ - identifier = module.params.get('identifier') - node_type = module.params.get('node_type') - username = module.params.get('username') - password = module.params.get('password') - d_b_name = module.params.get('db_name') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - tags = module.params.get('tags') + identifier = module.params.get("identifier") + node_type = module.params.get("node_type") + username = module.params.get("username") + password = module.params.get("password") + d_b_name = module.params.get("db_name") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") + tags = module.params.get("tags") changed = True # Package up the optional parameters params = {} - for p in ('cluster_type', 'cluster_security_groups', - 'vpc_security_group_ids', 'cluster_subnet_group_name', - 'availability_zone', 'preferred_maintenance_window', - 'cluster_parameter_group_name', - 'automated_snapshot_retention_period', 'port', - 'cluster_version', 'allow_version_upgrade', - 'number_of_nodes', 'publicly_accessible', 'encrypted', - 'elastic_ip', 'enhanced_vpc_routing'): + for p in ( + "cluster_type", + "cluster_security_groups", + "vpc_security_group_ids", + "cluster_subnet_group_name", + "availability_zone", + "preferred_maintenance_window", + "cluster_parameter_group_name", + "automated_snapshot_retention_period", + "port", + "cluster_version", + "allow_version_upgrade", + "number_of_nodes", + "publicly_accessible", + "encrypted", + "elastic_ip", + "enhanced_vpc_routing", + ): # https://github.com/boto/boto3/issues/400 if module.params.get(p) is not None: params[p] = module.params.get(p) if d_b_name: - params['d_b_name'] = d_b_name + params["d_b_name"] = d_b_name if tags: tags = ansible_dict_to_boto3_tag_list(tags) - params['tags'] = tags + params["tags"] = tags try: _describe_cluster(redshift, identifier) changed = False - except is_boto3_error_code('ClusterNotFound'): + except is_boto3_error_code("ClusterNotFound"): try: - _create_cluster(redshift, - ClusterIdentifier=identifier, - NodeType=node_type, - MasterUsername=username, - MasterUserPassword=password, - **snake_dict_to_camel_dict(params, capitalize_first=True)) + _create_cluster( + redshift, + ClusterIdentifier=identifier, + NodeType=node_type, + MasterUsername=username, + MasterUserPassword=password, + **snake_dict_to_camel_dict(params, capitalize_first=True), + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to create cluster") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to describe cluster") if wait: attempts = wait_timeout // 60 - waiter = redshift.get_waiter('cluster_available') + waiter = redshift.get_waiter("cluster_available") try: - waiter.wait( - ClusterIdentifier=identifier, - WaiterConfig=dict(MaxAttempts=attempts) - ) + waiter.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Timeout waiting for the cluster creation") try: @@ -448,7 +459,7 @@ def create_cluster(module, redshift): module.fail_json_aws(e, msg="Failed to describe cluster") if tags: - if _ensure_tags(redshift, identifier, resource['Tags'], module): + if _ensure_tags(redshift, identifier, resource["Tags"], module): changed = True resource = _describe_cluster(redshift, identifier) @@ -462,7 +473,7 @@ def describe_cluster(module, redshift): module: Ansible module object redshift: authenticated redshift connection object """ - identifier = module.params.get('identifier') + identifier = module.params.get("identifier") try: resource = _describe_cluster(redshift, identifier) @@ -480,13 +491,12 @@ def delete_cluster(module, redshift): redshift: authenticated redshift connection object """ - identifier = module.params.get('identifier') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') + identifier = module.params.get("identifier") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") params = {} - for p in ('skip_final_cluster_snapshot', - 'final_cluster_snapshot_identifier'): + for p in ("skip_final_cluster_snapshot", "final_cluster_snapshot_identifier"): if p in module.params: # https://github.com/boto/boto3/issues/400 if module.params.get(p) is not None: @@ -494,22 +504,21 @@ def delete_cluster(module, redshift): try: _delete_cluster( - redshift, - ClusterIdentifier=identifier, - **snake_dict_to_camel_dict(params, capitalize_first=True)) - except is_boto3_error_code('ClusterNotFound'): + redshift, ClusterIdentifier=identifier, **snake_dict_to_camel_dict(params, capitalize_first=True) + ) + except is_boto3_error_code("ClusterNotFound"): return False, {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to delete cluster") if wait: attempts = wait_timeout // 60 - waiter = redshift.get_waiter('cluster_deleted') + waiter = redshift.get_waiter("cluster_deleted") try: - waiter.wait( - ClusterIdentifier=identifier, - WaiterConfig=dict(MaxAttempts=attempts) - ) + waiter.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Timeout deleting the cluster") @@ -524,62 +533,63 @@ def modify_cluster(module, redshift): redshift: authenticated redshift connection object """ - identifier = module.params.get('identifier') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') + identifier = module.params.get("identifier") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") # Package up the optional parameters params = {} - for p in ('cluster_type', 'cluster_security_groups', - 'vpc_security_group_ids', 'cluster_subnet_group_name', - 'availability_zone', 'preferred_maintenance_window', - 'cluster_parameter_group_name', - 'automated_snapshot_retention_period', 'port', 'cluster_version', - 'allow_version_upgrade', 'number_of_nodes', 'new_cluster_identifier'): + for p in ( + "cluster_type", + "cluster_security_groups", + "vpc_security_group_ids", + "cluster_subnet_group_name", + "availability_zone", + "preferred_maintenance_window", + "cluster_parameter_group_name", + "automated_snapshot_retention_period", + "port", + "cluster_version", + "allow_version_upgrade", + "number_of_nodes", + "new_cluster_identifier", + ): # https://github.com/boto/boto3/issues/400 if module.params.get(p) is not None: params[p] = module.params.get(p) # enhanced_vpc_routing parameter change needs an exclusive request - if module.params.get('enhanced_vpc_routing') is not None: + if module.params.get("enhanced_vpc_routing") is not None: try: _modify_cluster( - redshift, - ClusterIdentifier=identifier, - EnhancedVpcRouting=module.params.get('enhanced_vpc_routing')) + redshift, ClusterIdentifier=identifier, EnhancedVpcRouting=module.params.get("enhanced_vpc_routing") + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier) if wait: attempts = wait_timeout // 60 - waiter = redshift.get_waiter('cluster_available') + waiter = redshift.get_waiter("cluster_available") try: - waiter.wait( - ClusterIdentifier=identifier, - WaiterConfig=dict(MaxAttempts=attempts)) + waiter.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, - msg="Timeout waiting for cluster enhanced vpc routing modification") + module.fail_json_aws(e, msg="Timeout waiting for cluster enhanced vpc routing modification") # change the rest try: _modify_cluster( - redshift, - ClusterIdentifier=identifier, - **snake_dict_to_camel_dict(params, capitalize_first=True)) + redshift, ClusterIdentifier=identifier, **snake_dict_to_camel_dict(params, capitalize_first=True) + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier) - if module.params.get('new_cluster_identifier'): - identifier = module.params.get('new_cluster_identifier') + if module.params.get("new_cluster_identifier"): + identifier = module.params.get("new_cluster_identifier") if wait: attempts = wait_timeout // 60 - waiter2 = redshift.get_waiter('cluster_available') + waiter2 = redshift.get_waiter("cluster_available") try: - waiter2.wait( - ClusterIdentifier=identifier, - WaiterConfig=dict(MaxAttempts=attempts) - ) + waiter2.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Timeout waiting for cluster modification") try: @@ -587,85 +597,96 @@ def modify_cluster(module, redshift): except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier) - if _ensure_tags(redshift, identifier, resource['Tags'], module): - resource = redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0] + if _ensure_tags(redshift, identifier, resource["Tags"], module): + resource = redshift.describe_clusters(ClusterIdentifier=identifier)["Clusters"][0] return True, _collect_facts(resource) def main(): argument_spec = dict( - command=dict(choices=['create', 'facts', 'delete', 'modify'], required=True), + command=dict(choices=["create", "facts", "delete", "modify"], required=True), identifier=dict(required=True), - node_type=dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', - 'ds2.8xlarge', 'dc1.large', 'dc2.large', - 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', - 'dw2.large', 'dw2.8xlarge'], required=False), + node_type=dict( + choices=[ + "ds1.xlarge", + "ds1.8xlarge", + "ds2.xlarge", + "ds2.8xlarge", + "dc1.large", + "dc2.large", + "dc1.8xlarge", + "dw1.xlarge", + "dw1.8xlarge", + "dw2.large", + "dw2.8xlarge", + ], + required=False, + ), username=dict(required=False), password=dict(no_log=True, required=False), db_name=dict(required=False), - cluster_type=dict(choices=['multi-node', 'single-node'], default='single-node'), - cluster_security_groups=dict(aliases=['security_groups'], type='list', elements='str'), - vpc_security_group_ids=dict(aliases=['vpc_security_groups'], type='list', elements='str'), - skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'], - type='bool', default=False), - final_cluster_snapshot_identifier=dict(aliases=['final_snapshot_id'], required=False), - cluster_subnet_group_name=dict(aliases=['subnet']), - availability_zone=dict(aliases=['aws_zone', 'zone']), - preferred_maintenance_window=dict(aliases=['maintance_window', 'maint_window']), - cluster_parameter_group_name=dict(aliases=['param_group_name']), - automated_snapshot_retention_period=dict(aliases=['retention_period'], type='int'), - port=dict(type='int'), - cluster_version=dict(aliases=['version'], choices=['1.0']), - allow_version_upgrade=dict(aliases=['version_upgrade'], type='bool', default=True), - number_of_nodes=dict(type='int'), - publicly_accessible=dict(type='bool', default=False), - encrypted=dict(type='bool', default=False), + cluster_type=dict(choices=["multi-node", "single-node"], default="single-node"), + cluster_security_groups=dict(aliases=["security_groups"], type="list", elements="str"), + vpc_security_group_ids=dict(aliases=["vpc_security_groups"], type="list", elements="str"), + skip_final_cluster_snapshot=dict(aliases=["skip_final_snapshot"], type="bool", default=False), + final_cluster_snapshot_identifier=dict(aliases=["final_snapshot_id"], required=False), + cluster_subnet_group_name=dict(aliases=["subnet"]), + availability_zone=dict(aliases=["aws_zone", "zone"]), + preferred_maintenance_window=dict(aliases=["maintance_window", "maint_window"]), + cluster_parameter_group_name=dict(aliases=["param_group_name"]), + automated_snapshot_retention_period=dict(aliases=["retention_period"], type="int"), + port=dict(type="int"), + cluster_version=dict(aliases=["version"], choices=["1.0"]), + allow_version_upgrade=dict(aliases=["version_upgrade"], type="bool", default=True), + number_of_nodes=dict(type="int"), + publicly_accessible=dict(type="bool", default=False), + encrypted=dict(type="bool", default=False), elastic_ip=dict(required=False), - new_cluster_identifier=dict(aliases=['new_identifier']), - enhanced_vpc_routing=dict(type='bool', default=False), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True) + new_cluster_identifier=dict(aliases=["new_identifier"]), + enhanced_vpc_routing=dict(type="bool", default=False), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=300), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), ) required_if = [ - ('command', 'delete', ['skip_final_cluster_snapshot']), - ('command', 'create', ['node_type', - 'username', - 'password']) + ("command", "delete", ["skip_final_cluster_snapshot"]), + ("command", "create", ["node_type", "username", "password"]), ] module = AnsibleAWSModule( argument_spec=argument_spec, - required_if=required_if + required_if=required_if, ) - command = module.params.get('command') - skip_final_cluster_snapshot = module.params.get('skip_final_cluster_snapshot') - final_cluster_snapshot_identifier = module.params.get('final_cluster_snapshot_identifier') + command = module.params.get("command") + skip_final_cluster_snapshot = module.params.get("skip_final_cluster_snapshot") + final_cluster_snapshot_identifier = module.params.get("final_cluster_snapshot_identifier") # can't use module basic required_if check for this case - if command == 'delete' and skip_final_cluster_snapshot is False and final_cluster_snapshot_identifier is None: - module.fail_json(msg="Need to specify final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False") + if command == "delete" and skip_final_cluster_snapshot is False and final_cluster_snapshot_identifier is None: + module.fail_json( + msg="Need to specify final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False" + ) - conn = module.client('redshift') + conn = module.client("redshift") changed = True - if command == 'create': + if command == "create": (changed, cluster) = create_cluster(module, conn) - elif command == 'facts': + elif command == "facts": (changed, cluster) = describe_cluster(module, conn) - elif command == 'delete': + elif command == "delete": (changed, cluster) = delete_cluster(module, conn) - elif command == 'modify': + elif command == "modify": (changed, cluster) = modify_cluster(module, conn) module.exit_json(changed=changed, cluster=cluster) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/redshift_cross_region_snapshots.py b/plugins/modules/redshift_cross_region_snapshots.py index f8e0970f65c..f4d895cb1cb 100644 --- a/plugins/modules/redshift_cross_region_snapshots.py +++ b/plugins/modules/redshift_cross_region_snapshots.py @@ -89,16 +89,13 @@ class SnapshotController(object): - def __init__(self, client, cluster_name): self.client = client self.cluster_name = cluster_name def get_cluster_snapshot_copy_status(self): - response = self.client.describe_clusters( - ClusterIdentifier=self.cluster_name - ) - return response['Clusters'][0].get('ClusterSnapshotCopyStatus') + response = self.client.describe_clusters(ClusterIdentifier=self.cluster_name) + return response["Clusters"][0].get("ClusterSnapshotCopyStatus") def enable_snapshot_copy(self, destination_region, grant_name, retention_period): if grant_name: @@ -116,78 +113,80 @@ def enable_snapshot_copy(self, destination_region, grant_name, retention_period) ) def disable_snapshot_copy(self): - self.client.disable_snapshot_copy( - ClusterIdentifier=self.cluster_name - ) + self.client.disable_snapshot_copy(ClusterIdentifier=self.cluster_name) def modify_snapshot_copy_retention_period(self, retention_period): self.client.modify_snapshot_copy_retention_period( - ClusterIdentifier=self.cluster_name, - RetentionPeriod=retention_period + ClusterIdentifier=self.cluster_name, RetentionPeriod=retention_period ) def requesting_unsupported_modifications(actual, requested): - if (actual['SnapshotCopyGrantName'] != requested['snapshot_copy_grant'] or - actual['DestinationRegion'] != requested['destination_region']): + if ( + actual["SnapshotCopyGrantName"] != requested["snapshot_copy_grant"] + or actual["DestinationRegion"] != requested["destination_region"] + ): return True return False def needs_update(actual, requested): - if actual['RetentionPeriod'] != requested['snapshot_retention_period']: + if actual["RetentionPeriod"] != requested["snapshot_retention_period"]: return True return False def run_module(): argument_spec = dict( - cluster_name=dict(type='str', required=True, aliases=['cluster']), - state=dict(type='str', choices=['present', 'absent'], default='present'), - region=dict(type='str', required=True, aliases=['source']), - destination_region=dict(type='str', required=True, aliases=['destination']), - snapshot_copy_grant=dict(type='str', aliases=['copy_grant']), - snapshot_retention_period=dict(type='int', required=True, aliases=['retention_period']), + cluster_name=dict(type="str", required=True, aliases=["cluster"]), + state=dict(type="str", choices=["present", "absent"], default="present"), + region=dict(type="str", required=True, aliases=["source"]), + destination_region=dict(type="str", required=True, aliases=["destination"]), + snapshot_copy_grant=dict(type="str", aliases=["copy_grant"]), + snapshot_retention_period=dict(type="int", required=True, aliases=["retention_period"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, ) result = dict( changed=False, - message='' + message="", ) - connection = module.client('redshift') + connection = module.client("redshift") - snapshot_controller = SnapshotController(client=connection, - cluster_name=module.params.get('cluster_name')) + snapshot_controller = SnapshotController(client=connection, cluster_name=module.params.get("cluster_name")) current_config = snapshot_controller.get_cluster_snapshot_copy_status() if current_config is not None: - if module.params.get('state') == 'present': + if module.params.get("state") == "present": if requesting_unsupported_modifications(current_config, module.params): - message = 'Cannot modify destination_region or grant_name. ' \ - 'Please disable cross-region snapshots, and re-run.' + message = ( + "Cannot modify destination_region or grant_name. " + "Please disable cross-region snapshots, and re-run." + ) module.fail_json(msg=message, **result) if needs_update(current_config, module.params): - result['changed'] = True + result["changed"] = True if not module.check_mode: snapshot_controller.modify_snapshot_copy_retention_period( - module.params.get('snapshot_retention_period') + module.params.get("snapshot_retention_period") ) else: - result['changed'] = True + result["changed"] = True if not module.check_mode: snapshot_controller.disable_snapshot_copy() else: - if module.params.get('state') == 'present': - result['changed'] = True + if module.params.get("state") == "present": + result["changed"] = True if not module.check_mode: - snapshot_controller.enable_snapshot_copy(module.params.get('destination_region'), - module.params.get('snapshot_copy_grant'), - module.params.get('snapshot_retention_period')) + snapshot_controller.enable_snapshot_copy( + module.params.get("destination_region"), + module.params.get("snapshot_copy_grant"), + module.params.get("snapshot_retention_period"), + ) module.exit_json(**result) @@ -195,5 +194,5 @@ def main(): run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/redshift_info.py b/plugins/modules/redshift_info.py index 2093dd38ad5..2a346167e24 100644 --- a/plugins/modules/redshift_info.py +++ b/plugins/modules/redshift_info.py @@ -287,31 +287,29 @@ def match_tags(tags_to_match, cluster): for key, value in tags_to_match.items(): - for tag in cluster['Tags']: - if key == tag['Key'] and value == tag['Value']: + for tag in cluster["Tags"]: + if key == tag["Key"] and value == tag["Value"]: return True return False def find_clusters(conn, module, identifier=None, tags=None): - try: - cluster_paginator = conn.get_paginator('describe_clusters') + cluster_paginator = conn.get_paginator("describe_clusters") clusters = cluster_paginator.paginate().build_full_result() except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to fetch clusters.') + module.fail_json_aws(e, msg="Failed to fetch clusters.") matched_clusters = [] if identifier is not None: - identifier_prog = re.compile('^' + identifier) - - for cluster in clusters['Clusters']: + identifier_prog = re.compile("^" + identifier) + for cluster in clusters["Clusters"]: matched_identifier = True if identifier: - matched_identifier = identifier_prog.search(cluster['ClusterIdentifier']) + matched_identifier = identifier_prog.search(cluster["ClusterIdentifier"]) matched_tags = True if tags: @@ -324,24 +322,23 @@ def find_clusters(conn, module, identifier=None, tags=None): def main(): - argument_spec = dict( - cluster_identifier=dict(type='str', aliases=['identifier', 'name']), - tags=dict(type='dict') + cluster_identifier=dict(type="str", aliases=["identifier", "name"]), + tags=dict(type="dict"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, ) - cluster_identifier = module.params.get('cluster_identifier') - cluster_tags = module.params.get('tags') + cluster_identifier = module.params.get("cluster_identifier") + cluster_tags = module.params.get("tags") - redshift = module.client('redshift') + redshift = module.client("redshift") results = find_clusters(redshift, module, identifier=cluster_identifier, tags=cluster_tags) module.exit_json(results=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/redshift_subnet_group.py b/plugins/modules/redshift_subnet_group.py index 724c064cbe9..3d693cc23ac 100644 --- a/plugins/modules/redshift_subnet_group.py +++ b/plugins/modules/redshift_subnet_group.py @@ -110,10 +110,13 @@ def get_subnet_group(name): groups = client.describe_cluster_subnet_groups( aws_retry=True, ClusterSubnetGroupName=name, - )['ClusterSubnetGroups'] - except is_boto3_error_code('ClusterSubnetGroupNotFoundFault'): + )["ClusterSubnetGroups"] + except is_boto3_error_code("ClusterSubnetGroupNotFoundFault"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to describe subnet group") if not groups: @@ -127,23 +130,22 @@ def get_subnet_group(name): # No support for managing tags yet, but make sure that we don't need to # change the return value structure after it's been available in a release. - tags = boto3_tag_list_to_ansible_dict(groups[0]['Tags']) + tags = boto3_tag_list_to_ansible_dict(groups[0]["Tags"]) subnet_group = camel_dict_to_snake_dict(groups[0]) - subnet_group['tags'] = tags - subnet_group['name'] = subnet_group['cluster_subnet_group_name'] + subnet_group["tags"] = tags + subnet_group["name"] = subnet_group["cluster_subnet_group_name"] - subnet_ids = list(s['subnet_identifier'] for s in subnet_group['subnets']) - subnet_group['subnet_ids'] = subnet_ids + subnet_ids = list(s["subnet_identifier"] for s in subnet_group["subnets"]) + subnet_group["subnet_ids"] = subnet_ids return subnet_group def create_subnet_group(name, description, subnets): - if not subnets: - module.fail_json(msg='At least one subnet must be provided when creating a subnet group') + module.fail_json(msg="At least one subnet must be provided when creating a subnet group") if module.check_mode: return True @@ -164,13 +166,13 @@ def create_subnet_group(name, description, subnets): def update_subnet_group(subnet_group, name, description, subnets): update_params = dict() - if description and subnet_group['description'] != description: - update_params['Description'] = description + if description and subnet_group["description"] != description: + update_params["Description"] = description if subnets: - old_subnets = set(subnet_group['subnet_ids']) + old_subnets = set(subnet_group["subnet_ids"]) new_subnets = set(subnets) if old_subnets != new_subnets: - update_params['SubnetIds'] = list(subnets) + update_params["SubnetIds"] = list(subnets) if not update_params: return False @@ -179,8 +181,8 @@ def update_subnet_group(subnet_group, name, description, subnets): return True # Description is optional, SubnetIds is not - if 'SubnetIds' not in update_params: - update_params['SubnetIds'] = subnet_group['subnet_ids'] + if "SubnetIds" not in update_params: + update_params["SubnetIds"] = subnet_group["subnet_ids"] try: client.modify_cluster_subnet_group( @@ -195,7 +197,6 @@ def update_subnet_group(subnet_group, name, description, subnets): def delete_subnet_group(name): - if module.check_mode: return True @@ -205,20 +206,23 @@ def delete_subnet_group(name): ClusterSubnetGroupName=name, ) return True - except is_boto3_error_code('ClusterSubnetGroupNotFoundFault'): + except is_boto3_error_code("ClusterSubnetGroupNotFoundFault"): # AWS is "eventually consistent", cope with the race conditions where # deletion hadn't completed when we ran describe return False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to delete subnet group") def main(): argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), - name=dict(required=True, aliases=['group_name']), - description=dict(required=False, aliases=['group_description']), - subnets=dict(required=False, aliases=['group_subnets'], type='list', elements='str'), + state=dict(default="present", choices=["present", "absent"]), + name=dict(required=True, aliases=["group_name"]), + description=dict(required=False, aliases=["group_description"]), + subnets=dict(required=False, aliases=["group_subnets"], type="list", elements="str"), ) global module @@ -229,17 +233,17 @@ def main(): supports_check_mode=True, ) - state = module.params.get('state') - name = module.params.get('name') - description = module.params.get('description') - subnets = module.params.get('subnets') + state = module.params.get("state") + name = module.params.get("name") + description = module.params.get("description") + subnets = module.params.get("subnets") - client = module.client('redshift', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("redshift", retry_decorator=AWSRetry.jittered_backoff()) subnet_group = get_subnet_group(name) changed = False - if state == 'present': + if state == "present": if not subnet_group: result = create_subnet_group(name, description, subnets) changed |= result @@ -255,9 +259,9 @@ def main(): compat_results = dict() if subnet_group: - compat_results['group'] = dict( - name=subnet_group['name'], - vpc_id=subnet_group['vpc_id'], + compat_results["group"] = dict( + name=subnet_group["name"], + vpc_id=subnet_group["vpc_id"], ) module.exit_json( @@ -267,5 +271,5 @@ def main(): ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/s3_bucket_info.py b/plugins/modules/s3_bucket_info.py index 20995539c45..ee4c0e2dd3f 100644 --- a/plugins/modules/s3_bucket_info.py +++ b/plugins/modules/s3_bucket_info.py @@ -424,18 +424,18 @@ def get_bucket_list(module, connection, name="", name_filter=""): # Get all buckets try: - buckets = camel_dict_to_snake_dict(connection.list_buckets())['buckets'] + buckets = camel_dict_to_snake_dict(connection.list_buckets())["buckets"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err_code: module.fail_json_aws(err_code, msg="Failed to list buckets") # Filter buckets if requested if name_filter: for bucket in buckets: - if name_filter in bucket['name']: + if name_filter in bucket["name"]: filtered_buckets.append(bucket) elif name: for bucket in buckets: - if name == bucket['name']: + if name == bucket["name"]: filtered_buckets.append(bucket) # Return proper list (filtered or all) @@ -453,7 +453,7 @@ def get_buckets_facts(connection, buckets, requested_facts, transform_location): full_bucket_list = [] # Iterate over all buckets and append retrived facts to bucket for bucket in buckets: - bucket.update(get_bucket_details(connection, bucket['name'], requested_facts, transform_location)) + bucket.update(get_bucket_details(connection, bucket["name"], requested_facts, transform_location)) full_bucket_list.append(bucket) return full_bucket_list @@ -467,14 +467,14 @@ def get_bucket_details(connection, name, requested_facts, transform_location): for key in requested_facts: if requested_facts[key]: - if key == 'bucket_location': + if key == "bucket_location": all_facts[key] = {} try: all_facts[key] = get_bucket_location(name, connection, transform_location) # we just pass on error - error means that resources is undefined except botocore.exceptions.ClientError: pass - elif key == 'bucket_tagging': + elif key == "bucket_tagging": all_facts[key] = {} try: all_facts[key] = get_bucket_tagging(name, connection) @@ -492,7 +492,7 @@ def get_bucket_details(connection, name, requested_facts, transform_location): return all_facts -@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def get_bucket_location(name, connection, transform_location=False): """ Get bucket location and optionally transform 'null' to 'us-east-1' @@ -502,16 +502,16 @@ def get_bucket_location(name, connection, transform_location=False): # Replace 'null' with 'us-east-1'? if transform_location: try: - if not data['LocationConstraint']: - data['LocationConstraint'] = 'us-east-1' + if not data["LocationConstraint"]: + data["LocationConstraint"] = "us-east-1" except KeyError: pass # Strip response metadata (not needed) - data.pop('ResponseMetadata', None) + data.pop("ResponseMetadata", None) return data -@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def get_bucket_tagging(name, connection): """ Get bucket tags and transform them using `boto3_tag_list_to_ansible_dict` function @@ -519,15 +519,15 @@ def get_bucket_tagging(name, connection): data = connection.get_bucket_tagging(Bucket=name) try: - bucket_tags = boto3_tag_list_to_ansible_dict(data['TagSet']) + bucket_tags = boto3_tag_list_to_ansible_dict(data["TagSet"]) return bucket_tags except KeyError: # Strip response metadata (not needed) - data.pop('ResponseMetadata', None) + data.pop("ResponseMetadata", None) return data -@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def get_bucket_property(name, connection, get_api_name): """ Get bucket property @@ -537,7 +537,7 @@ def get_bucket_property(name, connection, get_api_name): data = api_function(Bucket=name) # Strip response metadata (not needed) - data.pop('ResponseMetadata', None) + data.pop("ResponseMetadata", None) return data @@ -547,27 +547,30 @@ def main(): :return: """ argument_spec = dict( - name=dict(type='str', default=""), - name_filter=dict(type='str', default=""), - bucket_facts=dict(type='dict', options=dict( - bucket_accelerate_configuration=dict(type='bool', default=False), - bucket_acl=dict(type='bool', default=False), - bucket_cors=dict(type='bool', default=False), - bucket_encryption=dict(type='bool', default=False), - bucket_lifecycle_configuration=dict(type='bool', default=False), - bucket_location=dict(type='bool', default=False), - bucket_logging=dict(type='bool', default=False), - bucket_notification_configuration=dict(type='bool', default=False), - bucket_ownership_controls=dict(type='bool', default=False), - bucket_policy=dict(type='bool', default=False), - bucket_policy_status=dict(type='bool', default=False), - bucket_replication=dict(type='bool', default=False), - bucket_request_payment=dict(type='bool', default=False), - bucket_tagging=dict(type='bool', default=False), - bucket_website=dict(type='bool', default=False), - public_access_block=dict(type='bool', default=False), - )), - transform_location=dict(type='bool', default=False) + name=dict(type="str", default=""), + name_filter=dict(type="str", default=""), + bucket_facts=dict( + type="dict", + options=dict( + bucket_accelerate_configuration=dict(type="bool", default=False), + bucket_acl=dict(type="bool", default=False), + bucket_cors=dict(type="bool", default=False), + bucket_encryption=dict(type="bool", default=False), + bucket_lifecycle_configuration=dict(type="bool", default=False), + bucket_location=dict(type="bool", default=False), + bucket_logging=dict(type="bool", default=False), + bucket_notification_configuration=dict(type="bool", default=False), + bucket_ownership_controls=dict(type="bool", default=False), + bucket_policy=dict(type="bool", default=False), + bucket_policy_status=dict(type="bool", default=False), + bucket_replication=dict(type="bool", default=False), + bucket_request_payment=dict(type="bool", default=False), + bucket_tagging=dict(type="bool", default=False), + bucket_website=dict(type="bool", default=False), + public_access_block=dict(type="bool", default=False), + ), + ), + transform_location=dict(type="bool", default=False), ) # Ensure we have an empty dict @@ -575,11 +578,15 @@ def main(): # Define mutually exclusive options mutually_exclusive = [ - ['name', 'name_filter'] + ["name", "name_filter"], ] # Including ec2 argument spec - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + ) # Get parameters name = module.params.get("name") @@ -590,29 +597,29 @@ def main(): # Set up connection connection = {} try: - connection = module.client('s3') + connection = module.client("s3") except (connection.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err_code: - module.fail_json_aws(err_code, msg='Failed to connect to AWS') + module.fail_json_aws(err_code, msg="Failed to connect to AWS") # Get basic bucket list (name + creation date) bucket_list = get_bucket_list(module, connection, name, name_filter) # Add information about name/name_filter to result if name: - result['bucket_name'] = name + result["bucket_name"] = name elif name_filter: - result['bucket_name_filter'] = name_filter + result["bucket_name_filter"] = name_filter # Gather detailed information about buckets if requested bucket_facts = module.params.get("bucket_facts") if bucket_facts: - result['buckets'] = get_buckets_facts(connection, bucket_list, requested_facts, transform_location) + result["buckets"] = get_buckets_facts(connection, bucket_list, requested_facts, transform_location) else: - result['buckets'] = bucket_list + result["buckets"] = bucket_list module.exit_json(msg="Retrieved s3 info.", **result) # MAIN -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/s3_bucket_notification.py b/plugins/modules/s3_bucket_notification.py index 0a8109b2adb..9ba6e5e6799 100644 --- a/plugins/modules/s3_bucket_notification.py +++ b/plugins/modules/s3_bucket_notification.py @@ -171,36 +171,33 @@ class AmazonBucket: def __init__(self, module, client): self.module = module self.client = client - self.bucket_name = module.params['bucket_name'] + self.bucket_name = module.params["bucket_name"] self.check_mode = module.check_mode self._full_config_cache = None def full_config(self): if self._full_config_cache is None: self._full_config_cache = dict( - QueueConfigurations=[], - TopicConfigurations=[], - LambdaFunctionConfigurations=[] + QueueConfigurations=[], TopicConfigurations=[], LambdaFunctionConfigurations=[] ) try: - config_lookup = self.client.get_bucket_notification_configuration( - Bucket=self.bucket_name) + config_lookup = self.client.get_bucket_notification_configuration(Bucket=self.bucket_name) except (ClientError, BotoCoreError) as e: - self.module.fail_json(msg='{0}'.format(e)) + self.module.fail_json(msg="{0}".format(e)) # Handle different event targets - if config_lookup.get('QueueConfigurations'): - for queue_config in config_lookup.get('QueueConfigurations'): - self._full_config_cache['QueueConfigurations'].append(Config.from_api(queue_config)) + if config_lookup.get("QueueConfigurations"): + for queue_config in config_lookup.get("QueueConfigurations"): + self._full_config_cache["QueueConfigurations"].append(Config.from_api(queue_config)) - if config_lookup.get('TopicConfigurations'): - for topic_config in config_lookup.get('TopicConfigurations'): - self._full_config_cache['TopicConfigurations'].append(Config.from_api(topic_config)) + if config_lookup.get("TopicConfigurations"): + for topic_config in config_lookup.get("TopicConfigurations"): + self._full_config_cache["TopicConfigurations"].append(Config.from_api(topic_config)) - if config_lookup.get('LambdaFunctionConfigurations'): - for function_config in config_lookup.get('LambdaFunctionConfigurations'): - self._full_config_cache['LambdaFunctionConfigurations'].append(Config.from_api(function_config)) + if config_lookup.get("LambdaFunctionConfigurations"): + for function_config in config_lookup.get("LambdaFunctionConfigurations"): + self._full_config_cache["LambdaFunctionConfigurations"].append(Config.from_api(function_config)) return self._full_config_cache @@ -208,70 +205,59 @@ def current_config(self, config_name): # Iterate through configs and get current event config for target_configs in self.full_config(): for config in self.full_config()[target_configs]: - if config.raw['Id'] == config_name: + if config.raw["Id"] == config_name: return config def apply_config(self, desired): - configs = dict( - QueueConfigurations=[], - TopicConfigurations=[], - LambdaFunctionConfigurations=[] - ) + configs = dict(QueueConfigurations=[], TopicConfigurations=[], LambdaFunctionConfigurations=[]) # Iterate through existing configs then add the desired config for target_configs in self.full_config(): for config in self.full_config()[target_configs]: - if config.name != desired.raw['Id']: + if config.name != desired.raw["Id"]: configs[target_configs].append(config.raw) - if self.module.params.get('queue_arn'): - configs['QueueConfigurations'].append(desired.raw) - if self.module.params.get('topic_arn'): - configs['TopicConfigurations'].append(desired.raw) - if self.module.params.get('lambda_function_arn'): - configs['LambdaFunctionConfigurations'].append(desired.raw) + if self.module.params.get("queue_arn"): + configs["QueueConfigurations"].append(desired.raw) + if self.module.params.get("topic_arn"): + configs["TopicConfigurations"].append(desired.raw) + if self.module.params.get("lambda_function_arn"): + configs["LambdaFunctionConfigurations"].append(desired.raw) self._upload_bucket_config(configs) return configs def delete_config(self, desired): - configs = dict( - QueueConfigurations=[], - TopicConfigurations=[], - LambdaFunctionConfigurations=[] - ) + configs = dict(QueueConfigurations=[], TopicConfigurations=[], LambdaFunctionConfigurations=[]) # Iterate through existing configs omitting specified config for target_configs in self.full_config(): for config in self.full_config()[target_configs]: - if config.name != desired.raw['Id']: + if config.name != desired.raw["Id"]: configs[target_configs].append(config.raw) self._upload_bucket_config(configs) return configs def _upload_bucket_config(self, configs): - api_params = dict( - Bucket=self.bucket_name, - NotificationConfiguration=dict() - ) + api_params = dict(Bucket=self.bucket_name, NotificationConfiguration=dict()) # Iterate through available configs for target_configs in configs: if len(configs[target_configs]) > 0: - api_params['NotificationConfiguration'][target_configs] = configs[target_configs] + api_params["NotificationConfiguration"][target_configs] = configs[target_configs] if not self.check_mode: try: self.client.put_bucket_notification_configuration(**api_params) except (ClientError, BotoCoreError) as e: - self.module.fail_json(msg='{0}'.format(e)) + self.module.fail_json(msg="{0}".format(e)) class Config: def __init__(self, content): self._content = content - self.name = content.get('Id') + self.name = content.get("Id") @property def raw(self): @@ -287,41 +273,35 @@ def from_params(cls, **params): """Generate bucket notification params for target""" bucket_event_params = dict( - Id=params['event_name'], - Events=sorted(params['events']), + Id=params["event_name"], + Events=sorted(params["events"]), Filter=dict( Key=dict( FilterRules=[ - dict( - Name='Prefix', - Value=params['prefix'] - ), - dict( - Name='Suffix', - Value=params['suffix'] - ) + dict(Name="Prefix", Value=params["prefix"]), + dict(Name="Suffix", Value=params["suffix"]), ] ) - ) + ), ) # Handle different event targets - if params.get('queue_arn'): - bucket_event_params['QueueArn'] = params['queue_arn'] - if params.get('topic_arn'): - bucket_event_params['TopicArn'] = params['topic_arn'] - if params.get('lambda_function_arn'): - function_arn = params['lambda_function_arn'] + if params.get("queue_arn"): + bucket_event_params["QueueArn"] = params["queue_arn"] + if params.get("topic_arn"): + bucket_event_params["TopicArn"] = params["topic_arn"] + if params.get("lambda_function_arn"): + function_arn = params["lambda_function_arn"] qualifier = None - if params['lambda_version'] > 0: - qualifier = str(params['lambda_version']) - elif params['lambda_alias']: - qualifier = str(params['lambda_alias']) + if params["lambda_version"] > 0: + qualifier = str(params["lambda_version"]) + elif params["lambda_alias"]: + qualifier = str(params["lambda_alias"]) if qualifier: - params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier) + params["lambda_function_arn"] = "{0}:{1}".format(function_arn, qualifier) - bucket_event_params['LambdaFunctionArn'] = params['lambda_function_arn'] + bucket_event_params["LambdaFunctionArn"] = params["lambda_function_arn"] return cls(bucket_event_params) @@ -331,66 +311,70 @@ def from_api(cls, config): def setup_module_object(): - event_types = ['s3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post', - 's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload', - 's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete', - 's3:ObjectRemoved:DeleteMarkerCreated', 's3:ObjectRestore:Post', - 's3:ObjectRestore:Completed', 's3:ReducedRedundancyLostObject'] + event_types = [ + "s3:ObjectCreated:*", + "s3:ObjectCreated:Put", + "s3:ObjectCreated:Post", + "s3:ObjectCreated:Copy", + "s3:ObjectCreated:CompleteMultipartUpload", + "s3:ObjectRemoved:*", + "s3:ObjectRemoved:Delete", + "s3:ObjectRemoved:DeleteMarkerCreated", + "s3:ObjectRestore:Post", + "s3:ObjectRestore:Completed", + "s3:ReducedRedundancyLostObject", + ] argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), event_name=dict(required=True), - lambda_function_arn=dict(aliases=['function_arn']), - queue_arn=dict(type='str'), - topic_arn=dict(type='str'), + lambda_function_arn=dict(aliases=["function_arn"]), + queue_arn=dict(type="str"), + topic_arn=dict(type="str"), bucket_name=dict(required=True), - events=dict(type='list', default=[], choices=event_types, elements='str'), - prefix=dict(default=''), - suffix=dict(default=''), + events=dict(type="list", default=[], choices=event_types, elements="str"), + prefix=dict(default=""), + suffix=dict(default=""), lambda_alias=dict(), - lambda_version=dict(type='int', default=0), + lambda_version=dict(type="int", default=0), ) mutually_exclusive = [ - ['queue_arn', 'topic_arn', 'lambda_function_arn'], - ['lambda_alias', 'lambda_version'] + ["queue_arn", "topic_arn", "lambda_function_arn"], + ["lambda_alias", "lambda_version"], ] return AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive, - required_if=[['state', 'present', ['events']]] + required_if=[["state", "present", ["events"]]], ) def main(): module = setup_module_object() - client = module.client('s3') + client = module.client("s3") bucket = AmazonBucket(module, client) - current = bucket.current_config(module.params['event_name']) + current = bucket.current_config(module.params["event_name"]) desired = Config.from_params(**module.params) - notification_configs = dict( - QueueConfigurations=[], - TopicConfigurations=[], - LambdaFunctionConfigurations=[] - ) + notification_configs = dict(QueueConfigurations=[], TopicConfigurations=[], LambdaFunctionConfigurations=[]) for target_configs in bucket.full_config(): for cfg in bucket.full_config()[target_configs]: notification_configs[target_configs].append(camel_dict_to_snake_dict(cfg.raw)) - state = module.params['state'] + state = module.params["state"] updated_configuration = dict() changed = False - if state == 'present': + if state == "present": if current != desired: updated_configuration = bucket.apply_config(desired) changed = True - elif state == 'absent': + elif state == "absent": if current: updated_configuration = bucket.delete_config(desired) changed = True @@ -400,9 +384,8 @@ def main(): for cfg in updated_configuration.get(target_configs, list()): notification_configs[target_configs].append(camel_dict_to_snake_dict(cfg)) - module.exit_json(changed=changed, notification_configuration=camel_dict_to_snake_dict( - notification_configs)) + module.exit_json(changed=changed, notification_configuration=camel_dict_to_snake_dict(notification_configs)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/s3_cors.py b/plugins/modules/s3_cors.py index 797c8cc5050..0d92ba56eac 100644 --- a/plugins/modules/s3_cors.py +++ b/plugins/modules/s3_cors.py @@ -109,13 +109,12 @@ def create_or_update_bucket_cors(connection, module): - name = module.params.get("name") rules = module.params.get("rules", []) changed = False try: - current_camel_rules = connection.get_bucket_cors(Bucket=name)['CORSRules'] + current_camel_rules = connection.get_bucket_cors(Bucket=name)["CORSRules"] except ClientError: current_camel_rules = [] @@ -126,7 +125,7 @@ def create_or_update_bucket_cors(connection, module): if changed: try: - cors = connection.put_bucket_cors(Bucket=name, CORSConfiguration={'CORSRules': new_camel_rules}) + cors = connection.put_bucket_cors(Bucket=name, CORSConfiguration={"CORSRules": new_camel_rules}) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Unable to update CORS for bucket {0}".format(name)) @@ -134,7 +133,6 @@ def create_or_update_bucket_cors(connection, module): def destroy_bucket_cors(connection, module): - name = module.params.get("name") changed = False @@ -148,24 +146,23 @@ def destroy_bucket_cors(connection, module): def main(): - argument_spec = dict( - name=dict(required=True, type='str'), - rules=dict(type='list', elements='dict'), - state=dict(type='str', choices=['present', 'absent'], required=True) + name=dict(required=True, type="str"), + rules=dict(type="list", elements="dict"), + state=dict(type="str", choices=["present", "absent"], required=True), ) module = AnsibleAWSModule(argument_spec=argument_spec) - client = module.client('s3') + client = module.client("s3") state = module.params.get("state") - if state == 'present': + if state == "present": create_or_update_bucket_cors(client, module) - elif state == 'absent': + elif state == "absent": destroy_bucket_cors(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/s3_lifecycle.py b/plugins/modules/s3_lifecycle.py index 41d0de1d286..9d5ea3b8ff6 100644 --- a/plugins/modules/s3_lifecycle.py +++ b/plugins/modules/s3_lifecycle.py @@ -262,10 +262,13 @@ def fetch_rules(client, module, name): # Get the bucket's current lifecycle rules try: current_lifecycle = client.get_bucket_lifecycle_configuration(aws_retry=True, Bucket=name) - current_lifecycle_rules = normalize_boto3_result(current_lifecycle['Rules']) - except is_boto3_error_code('NoSuchLifecycleConfiguration'): + current_lifecycle_rules = normalize_boto3_result(current_lifecycle["Rules"]) + except is_boto3_error_code("NoSuchLifecycleConfiguration"): current_lifecycle_rules = [] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) return current_lifecycle_rules @@ -292,58 +295,63 @@ def build_rule(client, module): rule = dict(Filter=dict(Prefix=prefix), Status=status.title()) if rule_id is not None: - rule['ID'] = rule_id + rule["ID"] = rule_id if abort_incomplete_multipart_upload_days: - rule['AbortIncompleteMultipartUpload'] = { - 'DaysAfterInitiation': abort_incomplete_multipart_upload_days - } + rule["AbortIncompleteMultipartUpload"] = {"DaysAfterInitiation": abort_incomplete_multipart_upload_days} # Create expiration if expiration_days is not None: - rule['Expiration'] = dict(Days=expiration_days) + rule["Expiration"] = dict(Days=expiration_days) elif expiration_date is not None: - rule['Expiration'] = dict(Date=expiration_date.isoformat()) + rule["Expiration"] = dict(Date=expiration_date.isoformat()) elif expire_object_delete_marker is not None: - rule['Expiration'] = dict(ExpiredObjectDeleteMarker=expire_object_delete_marker) + rule["Expiration"] = dict(ExpiredObjectDeleteMarker=expire_object_delete_marker) if noncurrent_version_expiration_days or noncurrent_version_keep_newer: - rule['NoncurrentVersionExpiration'] = dict() + rule["NoncurrentVersionExpiration"] = dict() if noncurrent_version_expiration_days is not None: - rule['NoncurrentVersionExpiration']['NoncurrentDays'] = noncurrent_version_expiration_days + rule["NoncurrentVersionExpiration"]["NoncurrentDays"] = noncurrent_version_expiration_days if noncurrent_version_keep_newer is not None: - rule['NoncurrentVersionExpiration']['NewerNoncurrentVersions'] = noncurrent_version_keep_newer + rule["NoncurrentVersionExpiration"]["NewerNoncurrentVersions"] = noncurrent_version_keep_newer if transition_days is not None: - rule['Transitions'] = [dict(Days=transition_days, StorageClass=storage_class.upper()), ] + rule["Transitions"] = [ + dict(Days=transition_days, StorageClass=storage_class.upper()), + ] elif transition_date is not None: - rule['Transitions'] = [dict(Date=transition_date.isoformat(), StorageClass=storage_class.upper()), ] + rule["Transitions"] = [ + dict(Date=transition_date.isoformat(), StorageClass=storage_class.upper()), + ] if transitions is not None: - if not rule.get('Transitions'): - rule['Transitions'] = [] + if not rule.get("Transitions"): + rule["Transitions"] = [] for transition in transitions: t_out = dict() - if transition.get('transition_date'): - t_out['Date'] = transition['transition_date'] - elif transition.get('transition_days') is not None: - t_out['Days'] = int(transition['transition_days']) - if transition.get('storage_class'): - t_out['StorageClass'] = transition['storage_class'].upper() - rule['Transitions'].append(t_out) + if transition.get("transition_date"): + t_out["Date"] = transition["transition_date"] + elif transition.get("transition_days") is not None: + t_out["Days"] = transition["transition_days"] + if transition.get("storage_class"): + t_out["StorageClass"] = transition["storage_class"].upper() + rule["Transitions"].append(t_out) if noncurrent_version_transition_days is not None: - rule['NoncurrentVersionTransitions'] = [dict(NoncurrentDays=noncurrent_version_transition_days, - StorageClass=noncurrent_version_storage_class.upper()), ] + rule["NoncurrentVersionTransitions"] = [ + dict( + NoncurrentDays=noncurrent_version_transition_days, StorageClass=noncurrent_version_storage_class.upper() + ), + ] if noncurrent_version_transitions is not None: - if not rule.get('NoncurrentVersionTransitions'): - rule['NoncurrentVersionTransitions'] = [] + if not rule.get("NoncurrentVersionTransitions"): + rule["NoncurrentVersionTransitions"] = [] for noncurrent_version_transition in noncurrent_version_transitions: t_out = dict() - t_out['NoncurrentDays'] = noncurrent_version_transition['transition_days'] - if noncurrent_version_transition.get('storage_class'): - t_out['StorageClass'] = noncurrent_version_transition['storage_class'].upper() - rule['NoncurrentVersionTransitions'].append(t_out) + t_out["NoncurrentDays"] = noncurrent_version_transition["transition_days"] + if noncurrent_version_transition.get("storage_class"): + t_out["StorageClass"] = noncurrent_version_transition["storage_class"].upper() + rule["NoncurrentVersionTransitions"].append(t_out) return rule @@ -360,23 +368,29 @@ def compare_and_update_configuration(client, module, current_lifecycle_rules, ru if current_lifecycle_rules: # If rule ID exists, use that for comparison otherwise compare based on prefix for existing_rule in current_lifecycle_rules: - if rule.get('ID') == existing_rule.get('ID') and rule['Filter'].get('Prefix', '') != existing_rule.get('Filter', {}).get('Prefix', ''): - existing_rule.pop('ID') - elif rule_id is None and rule['Filter'].get('Prefix', '') == existing_rule.get('Filter', {}).get('Prefix', ''): - existing_rule.pop('ID') - if rule.get('ID') == existing_rule.get('ID'): - changed_, appended_ = update_or_append_rule(rule, existing_rule, purge_transitions, lifecycle_configuration) + if rule.get("ID") == existing_rule.get("ID") and rule["Filter"].get("Prefix", "") != existing_rule.get( + "Filter", {} + ).get("Prefix", ""): + existing_rule.pop("ID") + elif rule_id is None and rule["Filter"].get("Prefix", "") == existing_rule.get("Filter", {}).get( + "Prefix", "" + ): + existing_rule.pop("ID") + if rule.get("ID") == existing_rule.get("ID"): + changed_, appended_ = update_or_append_rule( + rule, existing_rule, purge_transitions, lifecycle_configuration + ) changed = changed_ or changed appended = appended_ or appended else: - lifecycle_configuration['Rules'].append(existing_rule) + lifecycle_configuration["Rules"].append(existing_rule) # If nothing appended then append now as the rule must not exist if not appended: - lifecycle_configuration['Rules'].append(rule) + lifecycle_configuration["Rules"].append(rule) changed = True else: - lifecycle_configuration['Rules'].append(rule) + lifecycle_configuration["Rules"].append(rule) changed = True return changed, lifecycle_configuration @@ -384,24 +398,24 @@ def compare_and_update_configuration(client, module, current_lifecycle_rules, ru def update_or_append_rule(new_rule, existing_rule, purge_transitions, lifecycle_obj): changed = False - if existing_rule['Status'] != new_rule['Status']: - if not new_rule.get('Transitions') and existing_rule.get('Transitions'): - new_rule['Transitions'] = existing_rule['Transitions'] - if not new_rule.get('Expiration') and existing_rule.get('Expiration'): - new_rule['Expiration'] = existing_rule['Expiration'] - if not new_rule.get('NoncurrentVersionExpiration') and existing_rule.get('NoncurrentVersionExpiration'): - new_rule['NoncurrentVersionExpiration'] = existing_rule['NoncurrentVersionExpiration'] - lifecycle_obj['Rules'].append(new_rule) + if existing_rule["Status"] != new_rule["Status"]: + if not new_rule.get("Transitions") and existing_rule.get("Transitions"): + new_rule["Transitions"] = existing_rule["Transitions"] + if not new_rule.get("Expiration") and existing_rule.get("Expiration"): + new_rule["Expiration"] = existing_rule["Expiration"] + if not new_rule.get("NoncurrentVersionExpiration") and existing_rule.get("NoncurrentVersionExpiration"): + new_rule["NoncurrentVersionExpiration"] = existing_rule["NoncurrentVersionExpiration"] + lifecycle_obj["Rules"].append(new_rule) changed = True appended = True else: if not purge_transitions: merge_transitions(new_rule, existing_rule) if compare_rule(new_rule, existing_rule, purge_transitions): - lifecycle_obj['Rules'].append(new_rule) + lifecycle_obj["Rules"].append(new_rule) appended = True else: - lifecycle_obj['Rules'].append(new_rule) + lifecycle_obj["Rules"].append(new_rule) changed = True appended = True return changed, appended @@ -415,24 +429,23 @@ def compare_and_remove_rule(current_lifecycle_rules, rule_id=None, prefix=None): # If an ID exists, use that otherwise compare based on prefix if rule_id is not None: for existing_rule in current_lifecycle_rules: - if rule_id == existing_rule['ID']: + if rule_id == existing_rule["ID"]: # We're not keeping the rule (i.e. deleting) so mark as changed changed = True else: - lifecycle_configuration['Rules'].append(existing_rule) + lifecycle_configuration["Rules"].append(existing_rule) else: for existing_rule in current_lifecycle_rules: - if prefix == existing_rule['Filter'].get('Prefix', ''): + if prefix == existing_rule["Filter"].get("Prefix", ""): # We're not keeping the rule (i.e. deleting) so mark as changed changed = True else: - lifecycle_configuration['Rules'].append(existing_rule) + lifecycle_configuration["Rules"].append(existing_rule) return changed, lifecycle_configuration def compare_rule(new_rule, old_rule, purge_transitions): - # Copy objects rule1 = deepcopy(new_rule) rule2 = deepcopy(old_rule) @@ -440,10 +453,10 @@ def compare_rule(new_rule, old_rule, purge_transitions): if purge_transitions: return rule1 == rule2 else: - transitions1 = rule1.pop('Transitions', []) - transitions2 = rule2.pop('Transitions', []) - noncurrent_transtions1 = rule1.pop('NoncurrentVersionTransitions', []) - noncurrent_transtions2 = rule2.pop('NoncurrentVersionTransitions', []) + transitions1 = rule1.pop("Transitions", []) + transitions2 = rule2.pop("Transitions", []) + noncurrent_transtions1 = rule1.pop("NoncurrentVersionTransitions", []) + noncurrent_transtions2 = rule2.pop("NoncurrentVersionTransitions", []) if rule1 != rule2: return False for transition in transitions1: @@ -461,38 +474,39 @@ def merge_transitions(updated_rule, updating_rule): # in updating_rule to updated_rule updated_transitions = {} updating_transitions = {} - for transition in updated_rule.get('Transitions', []): - updated_transitions[transition['StorageClass']] = transition - for transition in updating_rule.get('Transitions', []): - updating_transitions[transition['StorageClass']] = transition + for transition in updated_rule.get("Transitions", []): + updated_transitions[transition["StorageClass"]] = transition + for transition in updating_rule.get("Transitions", []): + updating_transitions[transition["StorageClass"]] = transition for storage_class, transition in updating_transitions.items(): if updated_transitions.get(storage_class) is None: - updated_rule['Transitions'].append(transition) + updated_rule["Transitions"].append(transition) def create_lifecycle_rule(client, module): - name = module.params.get("name") wait = module.params.get("wait") changed = False old_lifecycle_rules = fetch_rules(client, module, name) new_rule = build_rule(client, module) - (changed, lifecycle_configuration) = compare_and_update_configuration(client, module, - old_lifecycle_rules, - new_rule) + (changed, lifecycle_configuration) = compare_and_update_configuration(client, module, old_lifecycle_rules, new_rule) if changed: # Write lifecycle to bucket try: client.put_bucket_lifecycle_configuration( - aws_retry=True, - Bucket=name, - LifecycleConfiguration=lifecycle_configuration) - except is_boto3_error_message('At least one action needs to be specified in a rule'): - # Amazon interpreted this as not changing anything + aws_retry=True, Bucket=name, LifecycleConfiguration=lifecycle_configuration + ) + except is_boto3_error_message("At least one action needs to be specified in a rule"): + # Amazon interpretted this as not changing anything changed = False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules + ) _changed = changed _retries = 10 @@ -505,9 +519,7 @@ def create_lifecycle_rule(client, module): time.sleep(5) _retries -= 1 new_rules = fetch_rules(client, module, name) - (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module, - new_rules, - new_rule) + (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module, new_rules, new_rule) if not _changed: _not_changed_cnt -= 1 _changed = True @@ -518,13 +530,17 @@ def create_lifecycle_rule(client, module): new_rules = fetch_rules(client, module, name) - module.exit_json(changed=changed, new_rule=new_rule, rules=new_rules, - old_rules=old_lifecycle_rules, _retries=_retries, - _config=lifecycle_configuration) + module.exit_json( + changed=changed, + new_rule=new_rule, + rules=new_rules, + old_rules=old_lifecycle_rules, + _retries=_retries, + _config=lifecycle_configuration, + ) def destroy_lifecycle_rule(client, module): - name = module.params.get("name") prefix = module.params.get("prefix") rule_id = module.params.get("rule_id") @@ -540,11 +556,10 @@ def destroy_lifecycle_rule(client, module): if changed: # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration try: - if lifecycle_obj['Rules']: + if lifecycle_obj["Rules"]: client.put_bucket_lifecycle_configuration( - aws_retry=True, - Bucket=name, - LifecycleConfiguration=lifecycle_obj) + aws_retry=True, Bucket=name, LifecycleConfiguration=lifecycle_obj + ) elif current_lifecycle_rules: changed = True client.delete_bucket_lifecycle(aws_retry=True, Bucket=name) @@ -573,33 +588,32 @@ def destroy_lifecycle_rule(client, module): new_rules = fetch_rules(client, module, name) - module.exit_json(changed=changed, rules=new_rules, old_rules=current_lifecycle_rules, - _retries=_retries) + module.exit_json(changed=changed, rules=new_rules, old_rules=current_lifecycle_rules, _retries=_retries) def main(): - s3_storage_class = ['glacier', 'onezone_ia', 'standard_ia', 'intelligent_tiering', 'deep_archive'] + s3_storage_class = ["glacier", "onezone_ia", "standard_ia", "intelligent_tiering", "deep_archive"] argument_spec = dict( - name=dict(required=True, type='str'), - abort_incomplete_multipart_upload_days=dict(type='int'), - expiration_days=dict(type='int'), + name=dict(required=True, type="str"), + abort_incomplete_multipart_upload_days=dict(type="int"), + expiration_days=dict(type="int"), expiration_date=dict(), - expire_object_delete_marker=dict(type='bool'), - noncurrent_version_expiration_days=dict(type='int'), - noncurrent_version_keep_newer=dict(type='int'), - noncurrent_version_storage_class=dict(default='glacier', type='str', choices=s3_storage_class), - noncurrent_version_transition_days=dict(type='int'), - noncurrent_version_transitions=dict(type='list', elements='dict'), + expire_object_delete_marker=dict(type="bool"), + noncurrent_version_expiration_days=dict(type="int"), + noncurrent_version_keep_newer=dict(type="int"), + noncurrent_version_storage_class=dict(default="glacier", type="str", choices=s3_storage_class), + noncurrent_version_transition_days=dict(type="int"), + noncurrent_version_transitions=dict(type="list", elements="dict"), prefix=dict(), rule_id=dict(), - state=dict(default='present', choices=['present', 'absent']), - status=dict(default='enabled', choices=['enabled', 'disabled']), - storage_class=dict(default='glacier', type='str', choices=s3_storage_class), - transition_days=dict(type='int'), + state=dict(default="present", choices=["present", "absent"]), + status=dict(default="enabled", choices=["enabled", "disabled"]), + storage_class=dict(default="glacier", type="str", choices=s3_storage_class), + transition_days=dict(type="int"), transition_date=dict(), - transitions=dict(type='list', elements='dict'), - purge_transitions=dict(default=True, type='bool'), - wait=dict(type='bool', default=False) + transitions=dict(type="list", elements="dict"), + purge_transitions=dict(default=True, type="bool"), + wait=dict(type="bool", default=False), ) module = AnsibleAWSModule( @@ -618,7 +632,7 @@ def main(): }, ) - client = module.client('s3', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("s3", retry_decorator=AWSRetry.jittered_backoff()) expiration_date = module.params.get("expiration_date") transition_date = module.params.get("transition_date") @@ -626,43 +640,51 @@ def main(): if module.params.get("noncurrent_version_keep_newer"): module.require_botocore_at_least( - "1.23.12", - reason="to set number of versions to keep with noncurrent_version_keep_newer" + "1.23.12", reason="to set number of versions to keep with noncurrent_version_keep_newer" ) - if state == 'present' and module.params["status"] == "enabled": # allow deleting/disabling a rule by id/prefix - - required_when_present = ('abort_incomplete_multipart_upload_days', - 'expiration_date', 'expiration_days', 'expire_object_delete_marker', - 'transition_date', 'transition_days', 'transitions', - 'noncurrent_version_expiration_days', - 'noncurrent_version_keep_newer', - 'noncurrent_version_transition_days', - 'noncurrent_version_transitions') + if state == "present" and module.params["status"] == "enabled": # allow deleting/disabling a rule by id/prefix + required_when_present = ( + "abort_incomplete_multipart_upload_days", + "expiration_date", + "expiration_days", + "expire_object_delete_marker", + "transition_date", + "transition_days", + "transitions", + "noncurrent_version_expiration_days", + "noncurrent_version_keep_newer", + "noncurrent_version_transition_days", + "noncurrent_version_transitions", + ) for param in required_when_present: if module.params.get(param) is None: break else: - msg = "one of the following is required when 'state' is 'present': %s" % ', '.join(required_when_present) + msg = "one of the following is required when 'state' is 'present': %s" % ", ".join(required_when_present) module.fail_json(msg=msg) # If dates have been set, make sure they're in a valid format if expiration_date: expiration_date = parse_date(expiration_date) if expiration_date is None: - module.fail_json(msg="expiration_date is not a valid ISO-8601 format." - " The time must be midnight and a timezone of GMT must be included") + module.fail_json( + msg="expiration_date is not a valid ISO-8601 format." + " The time must be midnight and a timezone of GMT must be included" + ) if transition_date: transition_date = parse_date(transition_date) if transition_date is None: - module.fail_json(msg="transition_date is not a valid ISO-8601 format." - " The time must be midnight and a timezone of GMT must be included") + module.fail_json( + msg="transition_date is not a valid ISO-8601 format." + " The time must be midnight and a timezone of GMT must be included" + ) - if state == 'present': + if state == "present": create_lifecycle_rule(client, module) - elif state == 'absent': + elif state == "absent": destroy_lifecycle_rule(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/s3_logging.py b/plugins/modules/s3_logging.py index 3db5fbf61e7..b2eda67d135 100644 --- a/plugins/modules/s3_logging.py +++ b/plugins/modules/s3_logging.py @@ -73,16 +73,15 @@ def compare_bucket_logging(bucket_logging, target_bucket, target_prefix): - - if not bucket_logging.get('LoggingEnabled', False): + if not bucket_logging.get("LoggingEnabled", False): if target_bucket: return True return False - logging = bucket_logging['LoggingEnabled'] - if logging['TargetBucket'] != target_bucket: + logging = bucket_logging["LoggingEnabled"] + if logging["TargetBucket"] != target_bucket: return True - if logging['TargetPrefix'] != target_prefix: + if logging["TargetPrefix"] != target_prefix: return True return False @@ -90,18 +89,18 @@ def compare_bucket_logging(bucket_logging, target_bucket, target_prefix): def verify_acls(connection, module, target_bucket): try: current_acl = connection.get_bucket_acl(aws_retry=True, Bucket=target_bucket) - current_grants = current_acl['Grants'] - except is_boto3_error_code('NoSuchBucket'): + current_grants = current_acl["Grants"] + except is_boto3_error_code("NoSuchBucket"): module.fail_json(msg="Target Bucket '{0}' not found".format(target_bucket)) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to fetch target bucket ACL") required_grant = { - 'Grantee': { - 'URI': "http://acs.amazonaws.com/groups/s3/LogDelivery", - 'Type': 'Group' - }, - 'Permission': 'FULL_CONTROL' + "Grantee": {"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", "Type": "Group"}, + "Permission": "FULL_CONTROL", } for grant in current_grants: @@ -114,8 +113,8 @@ def verify_acls(connection, module, target_bucket): updated_acl = dict(current_acl) updated_grants = list(current_grants) updated_grants.append(required_grant) - updated_acl['Grants'] = updated_grants - del updated_acl['ResponseMetadata'] + updated_acl["Grants"] = updated_grants + del updated_acl["ResponseMetadata"] try: connection.put_bucket_acl(aws_retry=True, Bucket=target_bucket, AccessControlPolicy=updated_acl) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: @@ -125,7 +124,6 @@ def verify_acls(connection, module, target_bucket): def enable_bucket_logging(connection, module): - bucket_name = module.params.get("name") target_bucket = module.params.get("target_bucket") target_prefix = module.params.get("target_prefix") @@ -133,9 +131,12 @@ def enable_bucket_logging(connection, module): try: bucket_logging = connection.get_bucket_logging(aws_retry=True, Bucket=bucket_name) - except is_boto3_error_code('NoSuchBucket'): + except is_boto3_error_code("NoSuchBucket"): module.fail_json(msg="Bucket '{0}' not found".format(bucket_name)) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to fetch current logging status") try: @@ -152,11 +153,12 @@ def enable_bucket_logging(connection, module): aws_retry=True, Bucket=bucket_name, BucketLoggingStatus={ - 'LoggingEnabled': { - 'TargetBucket': target_bucket, - 'TargetPrefix': target_prefix, + "LoggingEnabled": { + "TargetBucket": target_bucket, + "TargetPrefix": target_prefix, } - }) + }, + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to enable bucket logging") @@ -166,7 +168,6 @@ def enable_bucket_logging(connection, module): def disable_bucket_logging(connection, module): - bucket_name = module.params.get("name") changed = False @@ -182,11 +183,9 @@ def disable_bucket_logging(connection, module): module.exit_json(changed=True) try: - response = AWSRetry.jittered_backoff( - catch_extra_error_codes=['InvalidTargetBucketForLogging'] - )(connection.put_bucket_logging)( - Bucket=bucket_name, BucketLoggingStatus={} - ) + response = AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidTargetBucketForLogging"])( + connection.put_bucket_logging + )(Bucket=bucket_name, BucketLoggingStatus={}) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to disable bucket logging") @@ -194,24 +193,23 @@ def disable_bucket_logging(connection, module): def main(): - argument_spec = dict( name=dict(required=True), target_bucket=dict(required=False, default=None), target_prefix=dict(required=False, default=""), - state=dict(required=False, default='present', choices=['present', 'absent']), + state=dict(required=False, default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('s3', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("s3", retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get("state") - if state == 'present': + if state == "present": enable_bucket_logging(connection, module) - elif state == 'absent': + elif state == "absent": disable_bucket_logging(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/s3_metrics_configuration.py b/plugins/modules/s3_metrics_configuration.py index 333bb98cb67..90429ca64b4 100644 --- a/plugins/modules/s3_metrics_configuration.py +++ b/plugins/modules/s3_metrics_configuration.py @@ -110,41 +110,31 @@ def _create_metrics_configuration(mc_id, filter_prefix, filter_tags): - payload = { - 'Id': mc_id - } + payload = {"Id": mc_id} # Just a filter_prefix or just a single tag filter is a special case if filter_prefix and not filter_tags: - payload['Filter'] = { - 'Prefix': filter_prefix - } + payload["Filter"] = {"Prefix": filter_prefix} elif not filter_prefix and len(filter_tags) == 1: - payload['Filter'] = { - 'Tag': ansible_dict_to_boto3_tag_list(filter_tags)[0] - } + payload["Filter"] = {"Tag": ansible_dict_to_boto3_tag_list(filter_tags)[0]} # Otherwise we need to use 'And' elif filter_tags: - payload['Filter'] = { - 'And': { - 'Tags': ansible_dict_to_boto3_tag_list(filter_tags) - } - } + payload["Filter"] = {"And": {"Tags": ansible_dict_to_boto3_tag_list(filter_tags)}} if filter_prefix: - payload['Filter']['And']['Prefix'] = filter_prefix + payload["Filter"]["And"]["Prefix"] = filter_prefix return payload def create_or_update_metrics_configuration(client, module): - bucket_name = module.params.get('bucket_name') - mc_id = module.params.get('id') - filter_prefix = module.params.get('filter_prefix') - filter_tags = module.params.get('filter_tags') + bucket_name = module.params.get("bucket_name") + mc_id = module.params.get("id") + filter_prefix = module.params.get("filter_prefix") + filter_tags = module.params.get("filter_tags") try: response = client.get_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id) - metrics_configuration = response['MetricsConfiguration'] - except is_boto3_error_code('NoSuchConfiguration'): + metrics_configuration = response["MetricsConfiguration"] + except is_boto3_error_code("NoSuchConfiguration"): metrics_configuration = None except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get bucket metrics configuration") @@ -160,10 +150,7 @@ def create_or_update_metrics_configuration(client, module): try: client.put_bucket_metrics_configuration( - aws_retry=True, - Bucket=bucket_name, - Id=mc_id, - MetricsConfiguration=new_configuration + aws_retry=True, Bucket=bucket_name, Id=mc_id, MetricsConfiguration=new_configuration ) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to put bucket metrics configuration '%s'" % mc_id) @@ -172,12 +159,12 @@ def create_or_update_metrics_configuration(client, module): def delete_metrics_configuration(client, module): - bucket_name = module.params.get('bucket_name') - mc_id = module.params.get('id') + bucket_name = module.params.get("bucket_name") + mc_id = module.params.get("id") try: client.get_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id) - except is_boto3_error_code('NoSuchConfiguration'): + except is_boto3_error_code("NoSuchConfiguration"): module.exit_json(changed=False) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get bucket metrics configuration") @@ -187,7 +174,7 @@ def delete_metrics_configuration(client, module): try: client.delete_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id) - except is_boto3_error_code('NoSuchConfiguration'): + except is_boto3_error_code("NoSuchConfiguration"): module.exit_json(changed=False) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to delete bucket metrics configuration '%s'" % mc_id) @@ -197,29 +184,26 @@ def delete_metrics_configuration(client, module): def main(): argument_spec = dict( - bucket_name=dict(type='str', required=True), - id=dict(type='str', required=True), - filter_prefix=dict(type='str', required=False), - filter_tags=dict(default={}, type='dict', required=False, aliases=['filter_tag']), - state=dict(default='present', type='str', choices=['present', 'absent']), - ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True + bucket_name=dict(type="str", required=True), + id=dict(type="str", required=True), + filter_prefix=dict(type="str", required=False), + filter_tags=dict(default={}, type="dict", required=False, aliases=["filter_tag"]), + state=dict(default="present", type="str", choices=["present", "absent"]), ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - state = module.params.get('state') + state = module.params.get("state") try: - client = module.client('s3', retry_decorator=AWSRetry.exponential_backoff(retries=10, delay=3)) + client = module.client("s3", retry_decorator=AWSRetry.exponential_backoff(retries=10, delay=3)) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - if state == 'present': + if state == "present": create_or_update_metrics_configuration(client, module) - elif state == 'absent': + elif state == "absent": delete_metrics_configuration(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/s3_sync.py b/plugins/modules/s3_sync.py index 30a2e675f33..efc07efb150 100644 --- a/plugins/modules/s3_sync.py +++ b/plugins/modules/s3_sync.py @@ -261,25 +261,27 @@ def gather_files(fileroot, include=None, exclude=None): if os.path.isfile(fileroot): fullpath = fileroot fstat = os.stat(fullpath) - path_array = fileroot.split('/') + path_array = fileroot.split("/") chopped_path = path_array[-1] f_size = fstat[osstat.ST_SIZE] f_modified_epoch = fstat[osstat.ST_MTIME] - ret.append({ - 'fullpath': fullpath, - 'chopped_path': chopped_path, - 'modified_epoch': f_modified_epoch, - 'bytes': f_size, - }) + ret.append( + { + "fullpath": fullpath, + "chopped_path": chopped_path, + "modified_epoch": f_modified_epoch, + "bytes": f_size, + } + ) else: - for (dirpath, dirnames, filenames) in os.walk(fileroot): + for dirpath, dirnames, filenames in os.walk(fileroot): for fn in filenames: fullpath = os.path.join(dirpath, fn) # include/exclude if include: found = False - for x in include.split(','): + for x in include.split(","): if fnmatch.fnmatch(fn, x): found = True if not found: @@ -288,7 +290,7 @@ def gather_files(fileroot, include=None, exclude=None): if exclude: found = False - for x in exclude.split(','): + for x in exclude.split(","): if fnmatch.fnmatch(fn, x): found = True if found: @@ -299,36 +301,38 @@ def gather_files(fileroot, include=None, exclude=None): fstat = os.stat(fullpath) f_size = fstat[osstat.ST_SIZE] f_modified_epoch = fstat[osstat.ST_MTIME] - ret.append({ - 'fullpath': fullpath, - 'chopped_path': chopped_path, - 'modified_epoch': f_modified_epoch, - 'bytes': f_size, - }) + ret.append( + { + "fullpath": fullpath, + "chopped_path": chopped_path, + "modified_epoch": f_modified_epoch, + "bytes": f_size, + } + ) # dirpath = path *to* the directory # dirnames = subdirs *in* our directory # filenames return ret -def calculate_s3_path(filelist, key_prefix=''): +def calculate_s3_path(filelist, key_prefix=""): ret = [] for fileentry in filelist: # don't modify the input dict retentry = fileentry.copy() - retentry['s3_path'] = os.path.join(key_prefix, fileentry['chopped_path']) + retentry["s3_path"] = os.path.join(key_prefix, fileentry["chopped_path"]) ret.append(retentry) return ret -def calculate_local_etag(filelist, key_prefix=''): - '''Really, "calculate md5", but since AWS uses their own format, we'll just call - it a "local etag". TODO optimization: only calculate if remote key exists.''' +def calculate_local_etag(filelist, key_prefix=""): + """Really, "calculate md5", but since AWS uses their own format, we'll just call + it a "local etag". TODO optimization: only calculate if remote key exists.""" ret = [] for fileentry in filelist: # don't modify the input dict retentry = fileentry.copy() - retentry['local_etag'] = calculate_multipart_etag(fileentry['fullpath']) + retentry["local_etag"] = calculate_multipart_etag(fileentry["fullpath"]) ret.append(retentry) return ret @@ -337,20 +341,20 @@ def determine_mimetypes(filelist, override_map): ret = [] for fileentry in filelist: retentry = fileentry.copy() - localfile = fileentry['fullpath'] + localfile = fileentry["fullpath"] # reminder: file extension is '.txt', not 'txt'. file_extension = os.path.splitext(localfile)[1] if override_map and override_map.get(file_extension): # override? use it. - retentry['mime_type'] = override_map[file_extension] + retentry["mime_type"] = override_map[file_extension] else: # else sniff it - retentry['mime_type'], retentry['encoding'] = mimetypes.guess_type(localfile, strict=False) + retentry["mime_type"], retentry["encoding"] = mimetypes.guess_type(localfile, strict=False) # might be None or '' from one of the above. Not a great type but better than nothing. - if not retentry['mime_type']: - retentry['mime_type'] = 'application/octet-stream' + if not retentry["mime_type"]: + retentry["mime_type"] = "application/octet-stream" ret.append(retentry) @@ -362,10 +366,10 @@ def head_s3(s3, bucket, s3keys): for entry in s3keys: retentry = entry.copy() try: - retentry['s3_head'] = s3.head_object(Bucket=bucket, Key=entry['s3_path']) + retentry["s3_head"] = s3.head_object(Bucket=bucket, Key=entry["s3_path"]) # 404 (Missing) - File doesn't exist, we'll need to upload # 403 (Denied) - Sometimes we can write but not read, assume we'll need to upload - except is_boto3_error_code(['404', '403']): + except is_boto3_error_code(["404", "403"]): pass retkeys.append(retentry) return retkeys @@ -375,106 +379,127 @@ def filter_list(s3, bucket, s3filelist, strategy): keeplist = list(s3filelist) for e in keeplist: - e['_strategy'] = strategy + e["_strategy"] = strategy # init/fetch info from S3 if we're going to use it for comparisons - if not strategy == 'force': + if not strategy == "force": keeplist = head_s3(s3, bucket, s3filelist) # now actually run the strategies - if strategy == 'checksum': + if strategy == "checksum": for entry in keeplist: - if entry.get('s3_head'): + if entry.get("s3_head"): # since we have a remote s3 object, compare the values. - if entry['s3_head']['ETag'] == entry['local_etag']: + if entry["s3_head"]["ETag"] == entry["local_etag"]: # files match, so remove the entry - entry['skip_flag'] = True + entry["skip_flag"] = True else: # file etags don't match, keep the entry. pass else: # we don't have an etag, so we'll keep it. pass - elif strategy == 'date_size': + elif strategy == "date_size": for entry in keeplist: - if entry.get('s3_head'): + if entry.get("s3_head"): # fstat = entry['stat'] - local_modified_epoch = entry['modified_epoch'] - local_size = entry['bytes'] + local_modified_epoch = entry["modified_epoch"] + local_size = entry["bytes"] # py2's datetime doesn't have a timestamp() field, so we have to revert to something more awkward. # remote_modified_epoch = entry['s3_head']['LastModified'].timestamp() - remote_modified_datetime = entry['s3_head']['LastModified'] - delta = (remote_modified_datetime - datetime.datetime(1970, 1, 1, tzinfo=tz.tzutc())) + remote_modified_datetime = entry["s3_head"]["LastModified"] + delta = remote_modified_datetime - datetime.datetime(1970, 1, 1, tzinfo=tz.tzutc()) remote_modified_epoch = delta.seconds + (delta.days * 86400) - remote_size = entry['s3_head']['ContentLength'] + remote_size = entry["s3_head"]["ContentLength"] - entry['whytime'] = '{0} / {1}'.format(local_modified_epoch, remote_modified_epoch) - entry['whysize'] = '{0} / {1}'.format(local_size, remote_size) + entry["whytime"] = "{0} / {1}".format(local_modified_epoch, remote_modified_epoch) + entry["whysize"] = "{0} / {1}".format(local_size, remote_size) if local_modified_epoch <= remote_modified_epoch and local_size == remote_size: - entry['skip_flag'] = True + entry["skip_flag"] = True else: - entry['why'] = "no s3_head" + entry["why"] = "no s3_head" # else: probably 'force'. Basically we don't skip with any with other strategies. else: pass # prune 'please skip' entries, if any. - return [x for x in keeplist if not x.get('skip_flag')] + return [x for x in keeplist if not x.get("skip_flag")] def upload_files(s3, bucket, filelist, params): ret = [] for entry in filelist: - args = { - 'ContentType': entry['mime_type'] - } - if params.get('permission'): - args['ACL'] = params['permission'] - if params.get('cache_control'): - args['CacheControl'] = params['cache_control'] - if params.get('storage_class'): - args['StorageClass'] = params['storage_class'] + args = {"ContentType": entry["mime_type"]} + if params.get("permission"): + args["ACL"] = params["permission"] + if params.get("cache_control"): + args["CacheControl"] = params["cache_control"] + if params.get("storage_class"): + args["StorageClass"] = params["storage_class"] # if this fails exception is caught in main() - s3.upload_file(entry['fullpath'], bucket, entry['s3_path'], ExtraArgs=args, Callback=None, Config=None) + s3.upload_file(entry["fullpath"], bucket, entry["s3_path"], ExtraArgs=args, Callback=None, Config=None) ret.append(entry) return ret def remove_files(s3, sourcelist, params): - bucket = params.get('bucket') - key_prefix = params.get('key_prefix') - paginator = s3.get_paginator('list_objects_v2') - current_keys = set(x['Key'] for x in paginator.paginate(Bucket=bucket, Prefix=key_prefix).build_full_result().get('Contents', [])) - keep_keys = set(to_text(source_file['s3_path']) for source_file in sourcelist) + bucket = params.get("bucket") + key_prefix = params.get("key_prefix") + paginator = s3.get_paginator("list_objects_v2") + current_keys = set( + x["Key"] for x in paginator.paginate(Bucket=bucket, Prefix=key_prefix).build_full_result().get("Contents", []) + ) + keep_keys = set(to_text(source_file["s3_path"]) for source_file in sourcelist) delete_keys = list(current_keys - keep_keys) # can delete 1000 objects at a time - groups_of_keys = [delete_keys[i:i + 1000] for i in range(0, len(delete_keys), 1000)] + groups_of_keys = [delete_keys[i:i + 1000] for i in range(0, len(delete_keys), 1000)] # fmt:skip for keys in groups_of_keys: - s3.delete_objects(Bucket=bucket, Delete={'Objects': [{'Key': key} for key in keys]}) + s3.delete_objects(Bucket=bucket, Delete={"Objects": [{"Key": key} for key in keys]}) return delete_keys def main(): argument_spec = dict( - mode=dict(choices=['push'], default='push'), - file_change_strategy=dict(choices=['force', 'date_size', 'checksum'], default='date_size'), + mode=dict(choices=["push"], default="push"), + file_change_strategy=dict(choices=["force", "date_size", "checksum"], default="date_size"), bucket=dict(required=True), - key_prefix=dict(required=False, default='', no_log=False), - file_root=dict(required=True, type='path'), - permission=dict(required=False, choices=['private', 'public-read', 'public-read-write', 'authenticated-read', - 'aws-exec-read', 'bucket-owner-read', 'bucket-owner-full-control']), - mime_map=dict(required=False, type='dict'), + key_prefix=dict(required=False, default="", no_log=False), + file_root=dict(required=True, type="path"), + permission=dict( + required=False, + choices=[ + "private", + "public-read", + "public-read-write", + "authenticated-read", + "aws-exec-read", + "bucket-owner-read", + "bucket-owner-full-control", + ], + ), + mime_map=dict(required=False, type="dict"), exclude=dict(required=False, default=".*"), include=dict(required=False, default="*"), - cache_control=dict(required=False, default=''), - delete=dict(required=False, type='bool', default=False), - storage_class=dict(required=False, default='STANDARD', - choices=['STANDARD', 'REDUCED_REDUNDANCY', 'STANDARD_IA', 'ONEZONE_IA', - 'INTELLIGENT_TIERING', 'GLACIER', 'DEEP_ARCHIVE', 'OUTPOSTS']), + cache_control=dict(required=False, default=""), + delete=dict(required=False, type="bool", default=False), + storage_class=dict( + required=False, + default="STANDARD", + choices=[ + "STANDARD", + "REDUCED_REDUNDANCY", + "STANDARD_IA", + "ONEZONE_IA", + "INTELLIGENT_TIERING", + "GLACIER", + "DEEP_ARCHIVE", + "OUTPOSTS", + ], + ), # future options: encoding, metadata, retries ) @@ -483,36 +508,43 @@ def main(): ) if not HAS_DATEUTIL: - module.fail_json(msg='dateutil required for this module') + module.fail_json(msg="dateutil required for this module") result = {} - mode = module.params['mode'] + mode = module.params["mode"] try: - s3 = module.client('s3') + s3 = module.client("s3") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - if mode == 'push': + if mode == "push": try: - result['filelist_initial'] = gather_files(module.params['file_root'], exclude=module.params['exclude'], include=module.params['include']) - result['filelist_typed'] = determine_mimetypes(result['filelist_initial'], module.params.get('mime_map')) - result['filelist_s3'] = calculate_s3_path(result['filelist_typed'], module.params['key_prefix']) + result["filelist_initial"] = gather_files( + module.params["file_root"], exclude=module.params["exclude"], include=module.params["include"] + ) + result["filelist_typed"] = determine_mimetypes(result["filelist_initial"], module.params.get("mime_map")) + result["filelist_s3"] = calculate_s3_path(result["filelist_typed"], module.params["key_prefix"]) try: - result['filelist_local_etag'] = calculate_local_etag(result['filelist_s3']) + result["filelist_local_etag"] = calculate_local_etag(result["filelist_s3"]) except ValueError as e: - if module.params['file_change_strategy'] == 'checksum': - module.fail_json_aws(e, 'Unable to calculate checksum. If running in FIPS mode, you may need to use another file_change_strategy') - result['filelist_local_etag'] = result['filelist_s3'].copy() - result['filelist_actionable'] = filter_list(s3, module.params['bucket'], result['filelist_local_etag'], module.params['file_change_strategy']) - result['uploads'] = upload_files(s3, module.params['bucket'], result['filelist_actionable'], module.params) - - if module.params['delete']: - result['removed'] = remove_files(s3, result['filelist_local_etag'], module.params) + if module.params["file_change_strategy"] == "checksum": + module.fail_json_aws( + e, + "Unable to calculate checksum. If running in FIPS mode, you may need to use another file_change_strategy", + ) + result["filelist_local_etag"] = result["filelist_s3"].copy() + result["filelist_actionable"] = filter_list( + s3, module.params["bucket"], result["filelist_local_etag"], module.params["file_change_strategy"] + ) + result["uploads"] = upload_files(s3, module.params["bucket"], result["filelist_actionable"], module.params) + + if module.params["delete"]: + result["removed"] = remove_files(s3, result["filelist_local_etag"], module.params) # mark changed if we actually upload something. - if result.get('uploads') or result.get('removed'): - result['changed'] = True + if result.get("uploads") or result.get("removed"): + result["changed"] = True # result.update(filelist=actionable_filelist) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to push file") @@ -520,5 +552,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/s3_website.py b/plugins/modules/s3_website.py index b73da51a68c..38c411b1fe2 100644 --- a/plugins/modules/s3_website.py +++ b/plugins/modules/s3_website.py @@ -172,40 +172,37 @@ def _create_redirect_dict(url): - redirect_dict = {} - url_split = url.split(':') + url_split = url.split(":") # Did we split anything? if len(url_split) == 2: - redirect_dict[u'Protocol'] = url_split[0] - redirect_dict[u'HostName'] = url_split[1].replace('//', '') + redirect_dict["Protocol"] = url_split[0] + redirect_dict["HostName"] = url_split[1].replace("//", "") elif len(url_split) == 1: - redirect_dict[u'HostName'] = url_split[0] + redirect_dict["HostName"] = url_split[0] else: - raise ValueError('Redirect URL appears invalid') + raise ValueError("Redirect URL appears invalid") return redirect_dict def _create_website_configuration(suffix, error_key, redirect_all_requests): - website_configuration = {} if error_key is not None: - website_configuration['ErrorDocument'] = {'Key': error_key} + website_configuration["ErrorDocument"] = {"Key": error_key} if suffix is not None: - website_configuration['IndexDocument'] = {'Suffix': suffix} + website_configuration["IndexDocument"] = {"Suffix": suffix} if redirect_all_requests is not None: - website_configuration['RedirectAllRequestsTo'] = _create_redirect_dict(redirect_all_requests) + website_configuration["RedirectAllRequestsTo"] = _create_redirect_dict(redirect_all_requests) return website_configuration def enable_or_update_bucket_as_website(client_connection, resource_connection, module): - bucket_name = module.params.get("name") redirect_all_requests = module.params.get("redirect_all_requests") # If redirect_all_requests is set then don't use the default suffix that has been set @@ -223,14 +220,19 @@ def enable_or_update_bucket_as_website(client_connection, resource_connection, m try: website_config = client_connection.get_bucket_website(Bucket=bucket_name) - except is_boto3_error_code('NoSuchWebsiteConfiguration'): + except is_boto3_error_code("NoSuchWebsiteConfiguration"): website_config = None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get website configuration") if website_config is None: try: - bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) + bucket_website.put( + WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests) + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to set bucket website configuration") @@ -238,18 +240,26 @@ def enable_or_update_bucket_as_website(client_connection, resource_connection, m module.fail_json(msg=str(e)) else: try: - if (suffix is not None and website_config['IndexDocument']['Suffix'] != suffix) or \ - (error_key is not None and website_config['ErrorDocument']['Key'] != error_key) or \ - (redirect_all_requests is not None and website_config['RedirectAllRequestsTo'] != _create_redirect_dict(redirect_all_requests)): - + if ( + (suffix is not None and website_config["IndexDocument"]["Suffix"] != suffix) + or (error_key is not None and website_config["ErrorDocument"]["Key"] != error_key) + or ( + redirect_all_requests is not None + and website_config["RedirectAllRequestsTo"] != _create_redirect_dict(redirect_all_requests) + ) + ): try: - bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) + bucket_website.put( + WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests) + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to update bucket website configuration") except KeyError as e: try: - bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) + bucket_website.put( + WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests) + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to update bucket website configuration") @@ -264,15 +274,17 @@ def enable_or_update_bucket_as_website(client_connection, resource_connection, m def disable_bucket_as_website(client_connection, module): - changed = False bucket_name = module.params.get("name") try: client_connection.get_bucket_website(Bucket=bucket_name) - except is_boto3_error_code('NoSuchWebsiteConfiguration'): + except is_boto3_error_code("NoSuchWebsiteConfiguration"): module.exit_json(changed=changed) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get bucket website") try: @@ -285,36 +297,35 @@ def disable_bucket_as_website(client_connection, module): def main(): - argument_spec = dict( - name=dict(type='str', required=True), - state=dict(type='str', required=True, choices=['present', 'absent']), - suffix=dict(type='str', required=False, default='index.html'), - error_key=dict(type='str', required=False, no_log=False), - redirect_all_requests=dict(type='str', required=False), + name=dict(type="str", required=True), + state=dict(type="str", required=True, choices=["present", "absent"]), + suffix=dict(type="str", required=False, default="index.html"), + error_key=dict(type="str", required=False, no_log=False), + redirect_all_requests=dict(type="str", required=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[ - ['redirect_all_requests', 'suffix'], - ['redirect_all_requests', 'error_key'] + ["redirect_all_requests", "suffix"], + ["redirect_all_requests", "error_key"], ], ) try: - client_connection = module.client('s3') - resource_connection = module.resource('s3') + client_connection = module.client("s3") + resource_connection = module.resource("s3") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") state = module.params.get("state") - if state == 'present': + if state == "present": enable_or_update_bucket_as_website(client_connection, resource_connection, module) - elif state == 'absent': + elif state == "absent": disable_bucket_as_website(client_connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/secretsmanager_secret.py b/plugins/modules/secretsmanager_secret.py index 4aea26ebfc2..f611d600967 100644 --- a/plugins/modules/secretsmanager_secret.py +++ b/plugins/modules/secretsmanager_secret.py @@ -235,9 +235,19 @@ class Secret(object): """An object representation of the Secret described by the self.module args""" + def __init__( - self, name, secret_type, secret, resource_policy=None, description="", kms_key_id=None, - tags=None, lambda_arn=None, rotation_interval=None, replica_regions=None, + self, + name, + secret_type, + secret, + resource_policy=None, + description="", + kms_key_id=None, + tags=None, + lambda_arn=None, + rotation_interval=None, + replica_regions=None, ): self.name = name self.description = description @@ -258,9 +268,7 @@ def __init__( @property def create_args(self): - args = { - "Name": self.name - } + args = {"Name": self.name} if self.description: args["Description"] = self.description if self.kms_key_id: @@ -269,10 +277,9 @@ def create_args(self): add_replica_regions = [] for replica in self.replica_regions: if replica["kms_key_id"]: - add_replica_regions.append({'Region': replica["region"], - 'KmsKeyId': replica["kms_key_id"]}) + add_replica_regions.append({"Region": replica["region"], "KmsKeyId": replica["kms_key_id"]}) else: - add_replica_regions.append({'Region': replica["region"]}) + add_replica_regions.append({"Region": replica["region"]}) args["AddReplicaRegions"] = add_replica_regions if self.tags: args["Tags"] = ansible_dict_to_boto3_tag_list(self.tags) @@ -281,9 +288,7 @@ def create_args(self): @property def update_args(self): - args = { - "SecretId": self.name - } + args = {"SecretId": self.name} if self.description: args["Description"] = self.description if self.kms_key_id: @@ -293,9 +298,7 @@ def update_args(self): @property def secret_resource_policy_args(self): - args = { - "SecretId": self.name - } + args = {"SecretId": self.name} if self.resource_policy: args["ResourcePolicy"] = self.resource_policy return args @@ -315,7 +318,7 @@ class SecretsManagerInterface(object): def __init__(self, module): self.module = module - self.client = self.module.client('secretsmanager') + self.client = self.module.client("secretsmanager") def get_secret(self, name): try: @@ -376,9 +379,7 @@ def remove_replication(self, name, regions): self.module.exit_json(changed=True) try: replica_regions = [] - response = self.client.remove_regions_from_replication( - SecretId=name, - RemoveReplicaRegions=regions) + response = self.client.remove_regions_from_replication(SecretId=name, RemoveReplicaRegions=regions) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Failed to replicate secret") return response @@ -390,12 +391,10 @@ def replicate_secret(self, name, regions): replica_regions = [] for replica in regions: if replica["kms_key_id"]: - replica_regions.append({'Region': replica["region"], 'KmsKeyId': replica["kms_key_id"]}) + replica_regions.append({"Region": replica["region"], "KmsKeyId": replica["kms_key_id"]}) else: - replica_regions.append({'Region': replica["region"]}) - response = self.client.replicate_secret_to_regions( - SecretId=name, - AddReplicaRegions=replica_regions) + replica_regions.append({"Region": replica["region"]}) + response = self.client.replicate_secret_to_regions(SecretId=name, AddReplicaRegions=replica_regions) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Failed to replicate secret") return response @@ -436,7 +435,8 @@ def update_rotation(self, secret): response = self.client.rotate_secret( SecretId=secret.name, RotationLambdaARN=secret.rotation_lambda_arn, - RotationRules=secret.rotation_rules) + RotationRules=secret.rotation_rules, + ) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Failed to rotate secret secret") else: @@ -476,7 +476,7 @@ def secrets_match(self, desired_secret, current_secret): if desired_secret.kms_key_id != current_secret.get("KmsKeyId"): return False current_secret_value = self.client.get_secret_value(SecretId=current_secret.get("Name")) - if desired_secret.secret_type == 'SecretBinary': + if desired_secret.secret_type == "SecretBinary": desired_value = to_bytes(desired_secret.secret) else: desired_value = desired_secret.secret @@ -537,65 +537,69 @@ def compare_regions(desired_secret, current_secret): def main(): replica_args = dict( - region=dict(type='str', required=True), - kms_key_id=dict(type='str', required=False), + region=dict(type="str", required=True), + kms_key_id=dict(type="str", required=False), ) module = AnsibleAWSModule( argument_spec={ - 'name': dict(required=True), - 'state': dict(choices=['present', 'absent'], default='present'), - 'overwrite': dict(type='bool', default=True), - 'description': dict(default=""), - 'replica': dict(type='list', elements='dict', options=replica_args), - 'kms_key_id': dict(), - 'secret_type': dict(choices=['binary', 'string'], default="string"), - 'secret': dict(default="", no_log=True), - 'json_secret': dict(type='json', no_log=True), - 'resource_policy': dict(type='json', default=None), - 'tags': dict(type='dict', default=None, aliases=['resource_tags']), - 'purge_tags': dict(type='bool', default=True), - 'rotation_lambda': dict(), - 'rotation_interval': dict(type='int', default=30), - 'recovery_window': dict(type='int', default=30), + "name": dict(required=True), + "state": dict(choices=["present", "absent"], default="present"), + "overwrite": dict(type="bool", default=True), + "description": dict(default=""), + "replica": dict(type="list", elements="dict", options=replica_args), + "kms_key_id": dict(), + "secret_type": dict(choices=["binary", "string"], default="string"), + "secret": dict(default="", no_log=True), + "json_secret": dict(type="json", no_log=True), + "resource_policy": dict(type="json", default=None), + "tags": dict(type="dict", default=None, aliases=["resource_tags"]), + "purge_tags": dict(type="bool", default=True), + "rotation_lambda": dict(), + "rotation_interval": dict(type="int", default=30), + "recovery_window": dict(type="int", default=30), }, - mutually_exclusive=[['secret', 'json_secret']], + mutually_exclusive=[["secret", "json_secret"]], supports_check_mode=True, ) changed = False - state = module.params.get('state') + state = module.params.get("state") secrets_mgr = SecretsManagerInterface(module) - recovery_window = module.params.get('recovery_window') + recovery_window = module.params.get("recovery_window") secret = Secret( - module.params.get('name'), - module.params.get('secret_type'), - module.params.get('secret') or module.params.get('json_secret'), - description=module.params.get('description'), - replica_regions=module.params.get('replica'), - kms_key_id=module.params.get('kms_key_id'), - resource_policy=module.params.get('resource_policy'), - tags=module.params.get('tags'), - lambda_arn=module.params.get('rotation_lambda'), - rotation_interval=module.params.get('rotation_interval') + module.params.get("name"), + module.params.get("secret_type"), + module.params.get("secret") or module.params.get("json_secret"), + description=module.params.get("description"), + replica_regions=module.params.get("replica"), + kms_key_id=module.params.get("kms_key_id"), + resource_policy=module.params.get("resource_policy"), + tags=module.params.get("tags"), + lambda_arn=module.params.get("rotation_lambda"), + rotation_interval=module.params.get("rotation_interval"), ) - purge_tags = module.params.get('purge_tags') + purge_tags = module.params.get("purge_tags") current_secret = secrets_mgr.get_secret(secret.name) - if state == 'absent': + if state == "absent": if current_secret: if not current_secret.get("DeletedDate"): - result = camel_dict_to_snake_dict(secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window)) + result = camel_dict_to_snake_dict( + secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window) + ) changed = True elif current_secret.get("DeletedDate") and recovery_window == 0: - result = camel_dict_to_snake_dict(secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window)) + result = camel_dict_to_snake_dict( + secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window) + ) changed = True else: result = "secret already scheduled for deletion" else: result = "secret does not exist" - if state == 'present': + if state == "present": if current_secret is None: result = secrets_mgr.create_secret(secret) if secret.resource_policy and result.get("ARN"): @@ -607,7 +611,7 @@ def main(): secrets_mgr.restore_secret(secret.name) changed = True if not secrets_mgr.secrets_match(secret, current_secret): - overwrite = module.params.get('overwrite') + overwrite = module.params.get("overwrite") if overwrite: result = secrets_mgr.update_secret(secret) changed = True @@ -624,8 +628,8 @@ def main(): result = secrets_mgr.put_resource_policy(secret) changed = True - if module.params.get('tags') is not None: - current_tags = boto3_tag_list_to_ansible_dict(current_secret.get('Tags', [])) + if module.params.get("tags") is not None: + current_tags = boto3_tag_list_to_ansible_dict(current_secret.get("Tags", [])) tags_to_add, tags_to_remove = compare_aws_tags(current_tags, secret.tags, purge_tags) if tags_to_add: secrets_mgr.tag_secret(secret.name, ansible_dict_to_boto3_tag_list(tags_to_add)) @@ -643,12 +647,12 @@ def main(): changed = True result = camel_dict_to_snake_dict(secrets_mgr.get_secret(secret.name)) - if result.get('tags', None) is not None: - result['tags_dict'] = boto3_tag_list_to_ansible_dict(result.get('tags', [])) + if result.get("tags", None) is not None: + result["tags_dict"] = boto3_tag_list_to_ansible_dict(result.get("tags", [])) result.pop("response_metadata") module.exit_json(changed=changed, secret=result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ses_identity.py b/plugins/modules/ses_identity.py index df80b736b91..7a966da4a48 100644 --- a/plugins/modules/ses_identity.py +++ b/plugins/modules/ses_identity.py @@ -242,8 +242,10 @@ def get_verification_attributes(connection, module, identity, retries=0, retryDe try: response = connection.get_identity_verification_attributes(Identities=[identity], aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to retrieve identity verification attributes for {identity}'.format(identity=identity)) - identity_verification = response['VerificationAttributes'] + module.fail_json_aws( + e, msg="Failed to retrieve identity verification attributes for {identity}".format(identity=identity) + ) + identity_verification = response["VerificationAttributes"] if identity in identity_verification: break time.sleep(retryDelay) @@ -263,8 +265,10 @@ def get_identity_notifications(connection, module, identity, retries=0, retryDel try: response = connection.get_identity_notification_attributes(Identities=[identity], aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to retrieve identity notification attributes for {identity}'.format(identity=identity)) - notification_attributes = response['NotificationAttributes'] + module.fail_json_aws( + e, msg="Failed to retrieve identity notification attributes for {identity}".format(identity=identity) + ) + notification_attributes = response["NotificationAttributes"] # No clear AWS docs on when this happens, but it appears sometimes identities are not included in # in the notification attributes when the identity is first registered. Suspect that this is caused by @@ -280,7 +284,7 @@ def get_identity_notifications(connection, module, identity, retries=0, retryDel # something has gone very wrong. if len(notification_attributes) != 0: module.fail_json( - msg='Unexpected identity found in notification attributes, expected {0} but got {1!r}.'.format( + msg="Unexpected identity found in notification attributes, expected {0} but got {1!r}.".format( identity, notification_attributes.keys(), ) @@ -292,9 +296,9 @@ def get_identity_notifications(connection, module, identity, retries=0, retryDel def desired_topic(module, notification_type): - arg_dict = module.params.get(notification_type.lower() + '_notifications') + arg_dict = module.params.get(notification_type.lower() + "_notifications") if arg_dict: - return arg_dict.get('topic', None) + return arg_dict.get("topic", None) else: return None @@ -304,7 +308,7 @@ def update_notification_topic(connection, module, identity, identity_notificatio if module.params.get(f"{notification_type.lower()}_notifications") is None: return False - topic_key = notification_type + 'Topic' + topic_key = notification_type + "Topic" if identity_notifications is None: # If there is no configuration for notifications cannot be being sent to topics # hence assume None as the current state. @@ -335,17 +339,20 @@ def update_notification_topic(connection, module, identity, identity_notificatio connection.set_identity_notification_topic(**request_kwargs) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to set identity notification topic for {identity} {notification_type}'.format( - identity=identity, - notification_type=notification_type, - )) + module.fail_json_aws( + e, + msg="Failed to set identity notification topic for {identity} {notification_type}".format( + identity=identity, + notification_type=notification_type, + ), + ) return True return False def update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type): - arg_dict = module.params.get(notification_type.lower() + '_notifications') - header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled' + arg_dict = module.params.get(notification_type.lower() + "_notifications") + header_key = "HeadersIn" + notification_type + "NotificationsEnabled" if identity_notifications is None: # If there is no configuration for topic notifications, headers cannot be being # forwarded, hence assume false. @@ -358,21 +365,25 @@ def update_notification_topic_headers(connection, module, identity, identity_not # headers are not included since most API consumers would interpret absence as false. current = False - if arg_dict is not None and 'include_headers' in arg_dict: - required = arg_dict['include_headers'] + if arg_dict is not None and "include_headers" in arg_dict: + required = arg_dict["include_headers"] else: required = False if current != required: try: if not module.check_mode: - connection.set_identity_headers_in_notifications_enabled(Identity=identity, NotificationType=notification_type, Enabled=required, - aws_retry=True) + connection.set_identity_headers_in_notifications_enabled( + Identity=identity, NotificationType=notification_type, Enabled=required, aws_retry=True + ) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to set identity headers in notification for {identity} {notification_type}'.format( - identity=identity, - notification_type=notification_type, - )) + module.fail_json_aws( + e, + msg="Failed to set identity headers in notification for {identity} {notification_type}".format( + identity=identity, + notification_type=notification_type, + ), + ) return True return False @@ -383,51 +394,57 @@ def update_feedback_forwarding(connection, module, identity, identity_notificati # are being handled by SNS topics. So in the absence of identity_notifications # information existing feedback forwarding must be on. current = True - elif 'ForwardingEnabled' in identity_notifications: - current = identity_notifications['ForwardingEnabled'] + elif "ForwardingEnabled" in identity_notifications: + current = identity_notifications["ForwardingEnabled"] else: # If there is information on the notifications setup but no information on the # forwarding state it's pretty safe to assume forwarding is off. AWS API docs # suggest this information will always be included but best to be defensive current = False - required = module.params.get('feedback_forwarding') + required = module.params.get("feedback_forwarding") if current != required: try: if not module.check_mode: - connection.set_identity_feedback_forwarding_enabled(Identity=identity, ForwardingEnabled=required, aws_retry=True) + connection.set_identity_feedback_forwarding_enabled( + Identity=identity, ForwardingEnabled=required, aws_retry=True + ) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to set identity feedback forwarding for {identity}'.format(identity=identity)) + module.fail_json_aws( + e, msg="Failed to set identity feedback forwarding for {identity}".format(identity=identity) + ) return True return False def create_mock_notifications_response(module): resp = { - "ForwardingEnabled": module.params.get('feedback_forwarding'), + "ForwardingEnabled": module.params.get("feedback_forwarding"), } - for notification_type in ('Bounce', 'Complaint', 'Delivery'): - arg_dict = module.params.get(notification_type.lower() + '_notifications') - if arg_dict is not None and 'topic' in arg_dict: - resp[notification_type + 'Topic'] = arg_dict['topic'] - - header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled' - if arg_dict is not None and 'include_headers' in arg_dict: - resp[header_key] = arg_dict['include_headers'] + for notification_type in ("Bounce", "Complaint", "Delivery"): + arg_dict = module.params.get(notification_type.lower() + "_notifications") + if arg_dict is not None and "topic" in arg_dict: + resp[notification_type + "Topic"] = arg_dict["topic"] + + header_key = "HeadersIn" + notification_type + "NotificationsEnabled" + if arg_dict is not None and "include_headers" in arg_dict: + resp[header_key] = arg_dict["include_headers"] else: resp[header_key] = False return resp def update_identity_notifications(connection, module): - identity = module.params.get('identity') + identity = module.params.get("identity") changed = False identity_notifications = get_identity_notifications(connection, module, identity) - for notification_type in ('Bounce', 'Complaint', 'Delivery'): + for notification_type in ("Bounce", "Complaint", "Delivery"): changed |= update_notification_topic(connection, module, identity, identity_notifications, notification_type) - changed |= update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type) + changed |= update_notification_topic_headers( + connection, module, identity, identity_notifications, notification_type + ) changed |= update_feedback_forwarding(connection, module, identity, identity_notifications) @@ -440,25 +457,27 @@ def update_identity_notifications(connection, module): def validate_params_for_identity_present(module): - if module.params.get('feedback_forwarding') is False: - if not (desired_topic(module, 'Bounce') and desired_topic(module, 'Complaint')): - module.fail_json(msg="Invalid Parameter Value 'False' for 'feedback_forwarding'. AWS requires " - "feedback forwarding to be enabled unless bounces and complaints are handled by SNS topics") + if module.params.get("feedback_forwarding") is False: + if not (desired_topic(module, "Bounce") and desired_topic(module, "Complaint")): + module.fail_json( + msg="Invalid Parameter Value 'False' for 'feedback_forwarding'. AWS requires " + "feedback forwarding to be enabled unless bounces and complaints are handled by SNS topics" + ) def create_or_update_identity(connection, module, region, account_id): - identity = module.params.get('identity') + identity = module.params.get("identity") changed = False verification_attributes = get_verification_attributes(connection, module, identity) if verification_attributes is None: try: if not module.check_mode: - if '@' in identity: + if "@" in identity: connection.verify_email_identity(EmailAddress=identity, aws_retry=True) else: connection.verify_domain_identity(Domain=identity, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to verify identity {identity}'.format(identity=identity)) + module.fail_json_aws(e, msg="Failed to verify identity {identity}".format(identity=identity)) if module.check_mode: verification_attributes = { "VerificationStatus": "Pending", @@ -466,20 +485,22 @@ def create_or_update_identity(connection, module, region, account_id): else: verification_attributes = get_verification_attributes(connection, module, identity, retries=4) changed = True - elif verification_attributes['VerificationStatus'] not in ('Pending', 'Success'): - module.fail_json(msg="Identity " + identity + " in bad status " + verification_attributes['VerificationStatus'], - verification_attributes=camel_dict_to_snake_dict(verification_attributes)) + elif verification_attributes["VerificationStatus"] not in ("Pending", "Success"): + module.fail_json( + msg="Identity " + identity + " in bad status " + verification_attributes["VerificationStatus"], + verification_attributes=camel_dict_to_snake_dict(verification_attributes), + ) if verification_attributes is None: - module.fail_json(msg='Unable to load identity verification attributes after registering identity.') + module.fail_json(msg="Unable to load identity verification attributes after registering identity.") notifications_changed, notification_attributes = update_identity_notifications(connection, module) changed |= notifications_changed if notification_attributes is None: - module.fail_json(msg='Unable to load identity notification attributes.') + module.fail_json(msg="Unable to load identity notification attributes.") - identity_arn = 'arn:aws:ses:' + region + ':' + account_id + ':identity/' + identity + identity_arn = "arn:aws:ses:" + region + ":" + account_id + ":identity/" + identity module.exit_json( changed=changed, @@ -491,7 +512,7 @@ def create_or_update_identity(connection, module, region, account_id): def destroy_identity(connection, module): - identity = module.params.get('identity') + identity = module.params.get("identity") changed = False verification_attributes = get_verification_attributes(connection, module, identity) if verification_attributes is not None: @@ -499,7 +520,7 @@ def destroy_identity(connection, module): if not module.check_mode: connection.delete_identity(Identity=identity, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to delete identity {identity}'.format(identity=identity)) + module.fail_json_aws(e, msg="Failed to delete identity {identity}".format(identity=identity)) changed = True module.exit_json( @@ -509,44 +530,50 @@ def destroy_identity(connection, module): def get_account_id(module): - sts = module.client('sts') + sts = module.client("sts") try: caller_identity = sts.get_caller_identity() except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to retrieve caller identity') - return caller_identity['Account'] + module.fail_json_aws(e, msg="Failed to retrieve caller identity") + return caller_identity["Account"] def main(): module = AnsibleAWSModule( argument_spec={ - "identity": dict(required=True, type='str'), - "state": dict(default='present', choices=['present', 'absent']), - "bounce_notifications": dict(type='dict'), - "complaint_notifications": dict(type='dict'), - "delivery_notifications": dict(type='dict'), - "feedback_forwarding": dict(default=True, type='bool'), + "identity": dict(required=True, type="str"), + "state": dict(default="present", choices=["present", "absent"]), + "bounce_notifications": dict(type="dict"), + "complaint_notifications": dict(type="dict"), + "delivery_notifications": dict(type="dict"), + "feedback_forwarding": dict(default=True, type="bool"), }, supports_check_mode=True, ) - for notification_type in ('bounce', 'complaint', 'delivery'): - param_name = notification_type + '_notifications' + for notification_type in ("bounce", "complaint", "delivery"): + param_name = notification_type + "_notifications" arg_dict = module.params.get(param_name) if arg_dict: - extra_keys = [x for x in arg_dict.keys() if x not in ('topic', 'include_headers')] + extra_keys = [x for x in arg_dict.keys() if x not in ("topic", "include_headers")] if extra_keys: - module.fail_json(msg='Unexpected keys ' + str(extra_keys) + ' in ' + param_name + ' valid keys are topic or include_headers') + module.fail_json( + msg="Unexpected keys " + + str(extra_keys) + + " in " + + param_name + + " valid keys are topic or include_headers" + ) # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but # the ansible build runs multiple instances of the test in parallel that's caused throttling # failures so apply a jittered backoff to call SES calls. - connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ses", retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get("state") - if state == 'present': + if state == "present": region = module.region account_id = get_account_id(module) validate_params_for_identity_present(module) @@ -555,5 +582,5 @@ def main(): destroy_identity(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ses_identity_policy.py b/plugins/modules/ses_identity_policy.py index ed558307df5..a28d027549a 100644 --- a/plugins/modules/ses_identity_policy.py +++ b/plugins/modules/ses_identity_policy.py @@ -101,17 +101,17 @@ def get_identity_policy(connection, module, identity, policy_name): try: response = connection.get_identity_policies(Identity=identity, PolicyNames=[policy_name], aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to retrieve identity policy {policy}'.format(policy=policy_name)) - policies = response['Policies'] + module.fail_json_aws(e, msg="Failed to retrieve identity policy {policy}".format(policy=policy_name)) + policies = response["Policies"] if policy_name in policies: return policies[policy_name] return None def create_or_update_identity_policy(connection, module): - identity = module.params.get('identity') - policy_name = module.params.get('policy_name') - required_policy = module.params.get('policy') + identity = module.params.get("identity") + policy_name = module.params.get("policy_name") + required_policy = module.params.get("policy") required_policy_dict = json.loads(required_policy) changed = False @@ -121,9 +121,11 @@ def create_or_update_identity_policy(connection, module): changed = True try: if not module.check_mode: - connection.put_identity_policy(Identity=identity, PolicyName=policy_name, Policy=required_policy, aws_retry=True) + connection.put_identity_policy( + Identity=identity, PolicyName=policy_name, Policy=required_policy, aws_retry=True + ) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to put identity policy {policy}'.format(policy=policy_name)) + module.fail_json_aws(e, msg="Failed to put identity policy {policy}".format(policy=policy_name)) # Load the list of applied policies to include in the response. # In principle we should be able to just return the response, but given @@ -134,9 +136,9 @@ def create_or_update_identity_policy(connection, module): # # As a nice side benefit this also means the return is correct in check mode try: - policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames'] + policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)["PolicyNames"] except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to list identity policies') + module.fail_json_aws(e, msg="Failed to list identity policies") if policy_name is not None and policy_name not in policies_present: policies_present = list(policies_present) policies_present.append(policy_name) @@ -147,20 +149,20 @@ def create_or_update_identity_policy(connection, module): def delete_identity_policy(connection, module): - identity = module.params.get('identity') - policy_name = module.params.get('policy_name') + identity = module.params.get("identity") + policy_name = module.params.get("policy_name") changed = False try: - policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames'] + policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)["PolicyNames"] except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to list identity policies') + module.fail_json_aws(e, msg="Failed to list identity policies") if policy_name in policies_present: try: if not module.check_mode: connection.delete_identity_policy(Identity=identity, PolicyName=policy_name, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to delete identity policy {policy}'.format(policy=policy_name)) + module.fail_json_aws(e, msg="Failed to delete identity policy {policy}".format(policy=policy_name)) changed = True policies_present = list(policies_present) policies_present.remove(policy_name) @@ -174,12 +176,12 @@ def delete_identity_policy(connection, module): def main(): module = AnsibleAWSModule( argument_spec={ - 'identity': dict(required=True, type='str'), - 'state': dict(default='present', choices=['present', 'absent']), - 'policy_name': dict(required=True, type='str'), - 'policy': dict(type='json', default=None), + "identity": dict(required=True, type="str"), + "state": dict(default="present", choices=["present", "absent"]), + "policy_name": dict(required=True, type="str"), + "policy": dict(type="json", default=None), }, - required_if=[['state', 'present', ['policy']]], + required_if=[["state", "present", ["policy"]]], supports_check_mode=True, ) @@ -187,15 +189,15 @@ def main(): # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but # the ansible build runs multiple instances of the test in parallel that's caused throttling # failures so apply a jittered backoff to call SES calls. - connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ses", retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get("state") - if state == 'present': + if state == "present": create_or_update_identity_policy(connection, module) else: delete_identity_policy(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ses_rule_set.py b/plugins/modules/ses_rule_set.py index 72730b1b28f..9915622ed7d 100644 --- a/plugins/modules/ses_rule_set.py +++ b/plugins/modules/ses_rule_set.py @@ -116,11 +116,11 @@ def list_rule_sets(client, module): response = client.list_receipt_rule_sets(aws_retry=True) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Couldn't list rule sets.") - return response['RuleSets'] + return response["RuleSets"] def rule_set_in(name, rule_sets): - return any(s for s in rule_sets if s['Name'] == name) + return any(s for s in rule_sets if s["Name"] == name) def ruleset_active(client, module, name): @@ -128,8 +128,8 @@ def ruleset_active(client, module, name): active_rule_set = client.describe_active_receipt_rule_set(aws_retry=True) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Couldn't get the active rule set.") - if active_rule_set is not None and 'Metadata' in active_rule_set: - return name == active_rule_set['Metadata']['Name'] + if active_rule_set is not None and "Metadata" in active_rule_set: + return name == active_rule_set["Metadata"]["Name"] else: # Metadata was not set meaning there is no active rule set return False @@ -167,7 +167,7 @@ def update_active_rule_set(client, module, name, desired_active): def create_or_update_rule_set(client, module): - name = module.params.get('name') + name = module.params.get("name") check_mode = module.check_mode changed = False @@ -180,11 +180,13 @@ def create_or_update_rule_set(client, module): module.fail_json_aws(e, msg="Couldn't create rule set {0}.".format(name)) changed = True rule_sets = list(rule_sets) - rule_sets.append({ - 'Name': name, - }) + rule_sets.append( + { + "Name": name, + } + ) - (active_changed, active) = update_active_rule_set(client, module, name, module.params.get('active')) + (active_changed, active) = update_active_rule_set(client, module, name, module.params.get("active")) changed |= active_changed module.exit_json( @@ -195,30 +197,32 @@ def create_or_update_rule_set(client, module): def remove_rule_set(client, module): - name = module.params.get('name') + name = module.params.get("name") check_mode = module.check_mode changed = False rule_sets = list_rule_sets(client, module) if rule_set_in(name, rule_sets): active = ruleset_active(client, module, name) - if active and not module.params.get('force'): + if active and not module.params.get("force"): module.fail_json( - msg="Couldn't delete rule set {0} because it is currently active. Set force=true to delete an active ruleset.".format(name), + msg="Couldn't delete rule set {0} because it is currently active. Set force=true to delete an active ruleset.".format( + name + ), error={ "code": "CannotDelete", "message": "Cannot delete active rule set: {0}".format(name), - } + }, ) if not check_mode: - if active and module.params.get('force'): + if active and module.params.get("force"): deactivate_rule_set(client, module) try: client.delete_receipt_rule_set(RuleSetName=name, aws_retry=True) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Couldn't delete rule set {0}.".format(name)) changed = True - rule_sets = [x for x in rule_sets if x['Name'] != name] + rule_sets = [x for x in rule_sets if x["Name"] != name] module.exit_json( changed=changed, @@ -228,27 +232,27 @@ def remove_rule_set(client, module): def main(): argument_spec = dict( - name=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent']), - active=dict(type='bool'), - force=dict(type='bool', default=False), + name=dict(type="str", required=True), + state=dict(type="str", default="present", choices=["present", "absent"]), + active=dict(type="bool"), + force=dict(type="bool", default=False), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - state = module.params.get('state') + state = module.params.get("state") # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but # the ansible build runs multiple instances of the test in parallel that's caused throttling # failures so apply a jittered backoff to call SES calls. - client = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("ses", retry_decorator=AWSRetry.jittered_backoff()) - if state == 'absent': + if state == "absent": remove_rule_set(client, module) else: create_or_update_rule_set(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/sns.py b/plugins/modules/sns.py index 96f5b72e70e..53c63a05645 100644 --- a/plugins/modules/sns.py +++ b/plugins/modules/sns.py @@ -158,7 +158,7 @@ from botocore.exceptions import BotoCoreError from botocore.exceptions import ClientError except ImportError: - pass # Handled by AnsibleAWSModule + pass # Handled by AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.sns import topic_arn_lookup @@ -167,22 +167,22 @@ def main(): protocols = [ - 'http', - 'https', - 'email', - 'email_json', - 'sms', - 'sqs', - 'application', - 'lambda', + "http", + "https", + "email", + "email_json", + "sms", + "sqs", + "application", + "lambda", ] argument_spec = dict( - msg=dict(required=True, aliases=['default']), + msg=dict(required=True, aliases=["default"]), subject=dict(), topic=dict(required=True), - message_attributes=dict(type='dict'), - message_structure=dict(choices=['json', 'string'], default='json'), + message_attributes=dict(type="dict"), + message_structure=dict(choices=["json", "string"], default="json"), message_group_id=dict(), message_deduplication_id=dict(), ) @@ -193,50 +193,48 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec) sns_kwargs = dict( - Message=module.params['msg'], - Subject=module.params['subject'], - MessageStructure=module.params['message_structure'], + Message=module.params["msg"], + Subject=module.params["subject"], + MessageStructure=module.params["message_structure"], ) - if module.params['message_attributes']: - if module.params['message_structure'] != 'string': + if module.params["message_attributes"]: + if module.params["message_structure"] != "string": module.fail_json(msg='message_attributes is only supported when the message_structure is "string".') - sns_kwargs['MessageAttributes'] = module.params['message_attributes'] + sns_kwargs["MessageAttributes"] = module.params["message_attributes"] if module.params["message_group_id"]: sns_kwargs["MessageGroupId"] = module.params["message_group_id"] if module.params["message_deduplication_id"]: sns_kwargs["MessageDeduplicationId"] = module.params["message_deduplication_id"] - dict_msg = { - 'default': sns_kwargs['Message'] - } + dict_msg = {"default": sns_kwargs["Message"]} for p in protocols: if module.params[p]: - if sns_kwargs['MessageStructure'] != 'json': + if sns_kwargs["MessageStructure"] != "json": module.fail_json(msg='Protocol-specific messages are only supported when message_structure is "json".') - dict_msg[p.replace('_', '-')] = module.params[p] + dict_msg[p.replace("_", "-")] = module.params[p] - client = module.client('sns') + client = module.client("sns") - topic = module.params['topic'] - if ':' in topic: + topic = module.params["topic"] + if ":" in topic: # Short names can't contain ':' so we'll assume this is the full ARN - sns_kwargs['TopicArn'] = topic + sns_kwargs["TopicArn"] = topic else: - sns_kwargs['TopicArn'] = topic_arn_lookup(client, module, topic) + sns_kwargs["TopicArn"] = topic_arn_lookup(client, module, topic) - if not sns_kwargs['TopicArn']: - module.fail_json(msg='Could not find topic: {0}'.format(topic)) + if not sns_kwargs["TopicArn"]: + module.fail_json(msg="Could not find topic: {0}".format(topic)) - if sns_kwargs['MessageStructure'] == 'json': - sns_kwargs['Message'] = json.dumps(dict_msg) + if sns_kwargs["MessageStructure"] == "json": + sns_kwargs["Message"] = json.dumps(dict_msg) try: result = client.publish(**sns_kwargs) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to publish message') + module.fail_json_aws(e, msg="Failed to publish message") sns_result = dict(msg="OK", message_id=result["MessageId"]) @@ -246,5 +244,5 @@ def main(): module.exit_json(**sns_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/sns_topic.py b/plugins/modules/sns_topic.py index 03b3338350c..90929a476ea 100644 --- a/plugins/modules/sns_topic.py +++ b/plugins/modules/sns_topic.py @@ -353,24 +353,25 @@ class SnsTopicManager(object): - """ Handles SNS Topic creation and destruction """ - - def __init__(self, - module, - name, - topic_type, - state, - display_name, - policy, - delivery_policy, - subscriptions, - purge_subscriptions, - tags, - purge_tags, - content_based_deduplication, - check_mode): - - self.connection = module.client('sns') + """Handles SNS Topic creation and destruction""" + + def __init__( + self, + module, + name, + topic_type, + state, + display_name, + policy, + delivery_policy, + subscriptions, + purge_subscriptions, + tags, + purge_tags, + content_based_deduplication, + check_mode, + ): + self.connection = module.client("sns") self.module = module self.name = name self.topic_type = topic_type @@ -400,73 +401,80 @@ def _create_topic(self): # NOTE: Never set FifoTopic = False. Some regions (including GovCloud) # don't support the attribute being set, even to False. - if self.topic_type == 'fifo': - attributes['FifoTopic'] = 'true' - if not self.name.endswith('.fifo'): - self.name = self.name + '.fifo' + if self.topic_type == "fifo": + attributes["FifoTopic"] = "true" + if not self.name.endswith(".fifo"): + self.name = self.name + ".fifo" if self.tags: tags = ansible_dict_to_boto3_tag_list(self.tags) if not self.check_mode: try: - response = self.connection.create_topic(Name=self.name, - Attributes=attributes, - Tags=tags) + response = self.connection.create_topic(Name=self.name, Attributes=attributes, Tags=tags) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't create topic %s" % self.name) - self.topic_arn = response['TopicArn'] + self.topic_arn = response["TopicArn"] return True def _set_topic_attrs(self): changed = False try: - topic_attributes = self.connection.get_topic_attributes(TopicArn=self.topic_arn)['Attributes'] + topic_attributes = self.connection.get_topic_attributes(TopicArn=self.topic_arn)["Attributes"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't get topic attributes for topic %s" % self.topic_arn) - if self.display_name and self.display_name != topic_attributes['DisplayName']: + if self.display_name and self.display_name != topic_attributes["DisplayName"]: changed = True - self.attributes_set.append('display_name') + self.attributes_set.append("display_name") if not self.check_mode: try: - self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='DisplayName', - AttributeValue=self.display_name) + self.connection.set_topic_attributes( + TopicArn=self.topic_arn, AttributeName="DisplayName", AttributeValue=self.display_name + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't set display name") - if self.policy and compare_policies(self.policy, json.loads(topic_attributes['Policy'])): + if self.policy and compare_policies(self.policy, json.loads(topic_attributes["Policy"])): changed = True - self.attributes_set.append('policy') + self.attributes_set.append("policy") if not self.check_mode: try: - self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='Policy', - AttributeValue=json.dumps(self.policy)) + self.connection.set_topic_attributes( + TopicArn=self.topic_arn, AttributeName="Policy", AttributeValue=json.dumps(self.policy) + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't set topic policy") # Set content-based deduplication attribute. Ignore if topic_type is not fifo. - if ("FifoTopic" in topic_attributes and topic_attributes["FifoTopic"] == "true") and \ - self.content_based_deduplication: - enabled = "true" if self.content_based_deduplication in 'enabled' else "false" - if enabled != topic_attributes['ContentBasedDeduplication']: + if ( + "FifoTopic" in topic_attributes and topic_attributes["FifoTopic"] == "true" + ) and self.content_based_deduplication: + enabled = "true" if self.content_based_deduplication in "enabled" else "false" + if enabled != topic_attributes["ContentBasedDeduplication"]: changed = True - self.attributes_set.append('content_based_deduplication') + self.attributes_set.append("content_based_deduplication") if not self.check_mode: try: - self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='ContentBasedDeduplication', - AttributeValue=enabled) + self.connection.set_topic_attributes( + TopicArn=self.topic_arn, AttributeName="ContentBasedDeduplication", AttributeValue=enabled + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't set content-based deduplication") - if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or - compare_delivery_policies(self.delivery_policy, json.loads(topic_attributes['DeliveryPolicy']))): + if self.delivery_policy and ( + "DeliveryPolicy" not in topic_attributes + or compare_delivery_policies(self.delivery_policy, json.loads(topic_attributes["DeliveryPolicy"])) + ): changed = True - self.attributes_set.append('delivery_policy') + self.attributes_set.append("delivery_policy") if not self.check_mode: try: - self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='DeliveryPolicy', - AttributeValue=json.dumps(self.delivery_policy)) + self.connection.set_topic_attributes( + TopicArn=self.topic_arn, + AttributeName="DeliveryPolicy", + AttributeValue=json.dumps(self.delivery_policy), + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't set topic delivery policy") return changed @@ -474,20 +482,23 @@ def _set_topic_attrs(self): def _set_topic_subs(self): changed = False subscriptions_existing_list = set() - desired_subscriptions = [(sub['protocol'], - canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in - self.subscriptions] + desired_subscriptions = [ + (sub["protocol"], canonicalize_endpoint(sub["protocol"], sub["endpoint"])) for sub in self.subscriptions + ] for sub in list_topic_subscriptions(self.connection, self.module, self.topic_arn): - sub_key = (sub['Protocol'], sub['Endpoint']) + sub_key = (sub["Protocol"], sub["Endpoint"]) subscriptions_existing_list.add(sub_key) - if (self.purge_subscriptions and sub_key not in desired_subscriptions and - sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted')): + if ( + self.purge_subscriptions + and sub_key not in desired_subscriptions + and sub["SubscriptionArn"] not in ("PendingConfirmation", "Deleted") + ): changed = True self.subscriptions_deleted.append(sub_key) if not self.check_mode: try: - self.connection.unsubscribe(SubscriptionArn=sub['SubscriptionArn']) + self.connection.unsubscribe(SubscriptionArn=sub["SubscriptionArn"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic") @@ -503,8 +514,8 @@ def _set_topic_subs(self): def _init_desired_subscription_attributes(self): for sub in self.subscriptions: - sub_key = (sub['protocol'], canonicalize_endpoint(sub['protocol'], sub['endpoint'])) - tmp_dict = sub.get('attributes', {}) + sub_key = (sub["protocol"], canonicalize_endpoint(sub["protocol"], sub["endpoint"])) + tmp_dict = sub.get("attributes", {}) # aws sdk expects values to be strings # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sns.html#SNS.Client.set_subscription_attributes for k, v in tmp_dict.items(): @@ -515,26 +526,28 @@ def _init_desired_subscription_attributes(self): def _set_topic_subs_attributes(self): changed = False for sub in list_topic_subscriptions(self.connection, self.module, self.topic_arn): - sub_key = (sub['Protocol'], sub['Endpoint']) - sub_arn = sub['SubscriptionArn'] + sub_key = (sub["Protocol"], sub["Endpoint"]) + sub_arn = sub["SubscriptionArn"] if not self.desired_subscription_attributes.get(sub_key): # subscription attributes aren't defined in desired, skipping continue try: - sub_current_attributes = self.connection.get_subscription_attributes(SubscriptionArn=sub_arn)['Attributes'] + sub_current_attributes = self.connection.get_subscription_attributes(SubscriptionArn=sub_arn)[ + "Attributes" + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, "Couldn't get subscription attributes for subscription %s" % sub_arn) - raw_message = self.desired_subscription_attributes[sub_key].get('RawMessageDelivery') - if raw_message is not None and 'RawMessageDelivery' in sub_current_attributes: - if sub_current_attributes['RawMessageDelivery'].lower() != raw_message.lower(): + raw_message = self.desired_subscription_attributes[sub_key].get("RawMessageDelivery") + if raw_message is not None and "RawMessageDelivery" in sub_current_attributes: + if sub_current_attributes["RawMessageDelivery"].lower() != raw_message.lower(): changed = True if not self.check_mode: try: - self.connection.set_subscription_attributes(SubscriptionArn=sub_arn, - AttributeName='RawMessageDelivery', - AttributeValue=raw_message) + self.connection.set_subscription_attributes( + SubscriptionArn=sub_arn, AttributeName="RawMessageDelivery", AttributeValue=raw_message + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, "Couldn't set RawMessageDelivery subscription attribute") @@ -547,11 +560,11 @@ def _delete_subscriptions(self): if not subscriptions: return False for sub in subscriptions: - if sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted'): - self.subscriptions_deleted.append(sub['SubscriptionArn']) + if sub["SubscriptionArn"] not in ("PendingConfirmation", "Deleted"): + self.subscriptions_deleted.append(sub["SubscriptionArn"]) if not self.check_mode: try: - self.connection.unsubscribe(SubscriptionArn=sub['SubscriptionArn']) + self.connection.unsubscribe(SubscriptionArn=sub["SubscriptionArn"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic") return True @@ -566,7 +579,7 @@ def _delete_topic(self): return True def _name_is_arn(self): - return self.name.startswith('arn:') + return self.name.startswith("arn:") def ensure_ok(self): changed = False @@ -576,7 +589,9 @@ def ensure_ok(self): if self.topic_arn in list_topics(self.connection, self.module): changed |= self._set_topic_attrs() elif self.display_name or self.policy or self.delivery_policy: - self.module.fail_json(msg="Cannot set display name, policy or delivery policy for SNS topics not owned by this account") + self.module.fail_json( + msg="Cannot set display name, policy or delivery policy for SNS topics not owned by this account" + ) changed |= self._set_topic_subs() self._init_desired_subscription_attributes() if self.topic_arn in list_topics(self.connection, self.module): @@ -593,7 +608,9 @@ def ensure_gone(self): self.populate_topic_arn() if self.topic_arn: if self.topic_arn not in list_topics(self.connection, self.module): - self.module.fail_json(msg="Cannot use state=absent with third party ARN. Use subscribers=[] to unsubscribe") + self.module.fail_json( + msg="Cannot use state=absent with third party ARN. Use subscribers=[] to unsubscribe" + ) changed = self._delete_subscriptions() changed |= self._delete_topic() return changed @@ -604,7 +621,7 @@ def populate_topic_arn(self): return name = self.name - if self.topic_type == 'fifo' and not name.endswith('.fifo'): + if self.topic_type == "fifo" and not name.endswith(".fifo"): name += ".fifo" self.topic_arn = topic_arn_lookup(self.connection, self.module, name) @@ -613,83 +630,87 @@ def main(): # We're kinda stuck with CamelCase here, it would be nice to switch to # snake_case, but we'd need to purge out the alias entries http_retry_args = dict( - minDelayTarget=dict(type='int', required=True), - maxDelayTarget=dict(type='int', required=True), - numRetries=dict(type='int', required=True), - numMaxDelayRetries=dict(type='int', required=True), - numMinDelayRetries=dict(type='int', required=True), - numNoDelayRetries=dict(type='int', required=True), - backoffFunction=dict(type='str', required=True, choices=['arithmetic', 'exponential', 'geometric', 'linear']), + minDelayTarget=dict(type="int", required=True), + maxDelayTarget=dict(type="int", required=True), + numRetries=dict(type="int", required=True), + numMaxDelayRetries=dict(type="int", required=True), + numMinDelayRetries=dict(type="int", required=True), + numNoDelayRetries=dict(type="int", required=True), + backoffFunction=dict(type="str", required=True, choices=["arithmetic", "exponential", "geometric", "linear"]), ) http_delivery_args = dict( - defaultHealthyRetryPolicy=dict(type='dict', required=True, options=http_retry_args), - disableSubscriptionOverrides=dict(type='bool', required=False), + defaultHealthyRetryPolicy=dict(type="dict", required=True, options=http_retry_args), + disableSubscriptionOverrides=dict(type="bool", required=False), defaultThrottlePolicy=dict( - type='dict', required=False, + type="dict", + required=False, options=dict( - maxReceivesPerSecond=dict(type='int', required=True), + maxReceivesPerSecond=dict(type="int", required=True), ), ), ) delivery_args = dict( - http=dict(type='dict', required=False, options=http_delivery_args), + http=dict(type="dict", required=False, options=http_delivery_args), ) argument_spec = dict( name=dict(required=True), - topic_type=dict(type='str', default='standard', choices=['standard', 'fifo']), - state=dict(default='present', choices=['present', 'absent']), + topic_type=dict(type="str", default="standard", choices=["standard", "fifo"]), + state=dict(default="present", choices=["present", "absent"]), display_name=dict(), - policy=dict(type='dict'), - delivery_policy=dict(type='dict', options=delivery_args), - subscriptions=dict(default=[], type='list', elements='dict'), - purge_subscriptions=dict(type='bool', default=True), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - content_based_deduplication=dict(choices=['enabled', 'disabled']) + policy=dict(type="dict"), + delivery_policy=dict(type="dict", options=delivery_args), + subscriptions=dict(default=[], type="list", elements="dict"), + purge_subscriptions=dict(type="bool", default=True), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + content_based_deduplication=dict(choices=["enabled", "disabled"]), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True) - - name = module.params.get('name') - topic_type = module.params.get('topic_type') - state = module.params.get('state') - display_name = module.params.get('display_name') - policy = module.params.get('policy') - delivery_policy = module.params.get('delivery_policy') - subscriptions = module.params.get('subscriptions') - purge_subscriptions = module.params.get('purge_subscriptions') - content_based_deduplication = module.params.get('content_based_deduplication') + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + name = module.params.get("name") + topic_type = module.params.get("topic_type") + state = module.params.get("state") + display_name = module.params.get("display_name") + policy = module.params.get("policy") + delivery_policy = module.params.get("delivery_policy") + subscriptions = module.params.get("subscriptions") + purge_subscriptions = module.params.get("purge_subscriptions") + content_based_deduplication = module.params.get("content_based_deduplication") check_mode = module.check_mode - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - - sns_topic = SnsTopicManager(module, - name, - topic_type, - state, - display_name, - policy, - delivery_policy, - subscriptions, - purge_subscriptions, - tags, - purge_tags, - content_based_deduplication, - check_mode) - - if state == 'present': + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + + sns_topic = SnsTopicManager( + module, + name, + topic_type, + state, + display_name, + policy, + delivery_policy, + subscriptions, + purge_subscriptions, + tags, + purge_tags, + content_based_deduplication, + check_mode, + ) + + if state == "present": changed = sns_topic.ensure_ok() - elif state == 'absent': + elif state == "absent": changed = sns_topic.ensure_gone() - sns_facts = dict(changed=changed, - sns_arn=sns_topic.topic_arn, - sns_topic=get_info(sns_topic.connection, module, sns_topic.topic_arn)) + sns_facts = dict( + changed=changed, + sns_arn=sns_topic.topic_arn, + sns_topic=get_info(sns_topic.connection, module, sns_topic.topic_arn), + ) module.exit_json(**sns_facts) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/sns_topic_info.py b/plugins/modules/sns_topic_info.py index 2fcde33e94c..51ec8372eac 100644 --- a/plugins/modules/sns_topic_info.py +++ b/plugins/modules/sns_topic_info.py @@ -146,18 +146,17 @@ def main(): argument_spec = dict( - topic_arn=dict(type='str', required=False), + topic_arn=dict(type="str", required=False), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - topic_arn = module.params.get('topic_arn') + topic_arn = module.params.get("topic_arn") try: - connection = module.client('sns', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("sns", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS.') + module.fail_json_aws(e, msg="Failed to connect to AWS.") if topic_arn: results = dict(sns_arn=topic_arn, sns_topic=get_info(connection, module, topic_arn)) @@ -167,5 +166,5 @@ def main(): module.exit_json(result=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/sqs_queue.py b/plugins/modules/sqs_queue.py index 4a23f18871b..ad3ce68a7ce 100644 --- a/plugins/modules/sqs_queue.py +++ b/plugins/modules/sqs_queue.py @@ -277,18 +277,18 @@ def get_queue_name(module, is_fifo=False): - name = module.params.get('name') - if not is_fifo or name.endswith('.fifo'): + name = module.params.get("name") + if not is_fifo or name.endswith(".fifo"): return name - return name + '.fifo' + return name + ".fifo" # NonExistentQueue is explicitly expected when a queue doesn't exist @AWSRetry.jittered_backoff() def get_queue_url(client, name): try: - return client.get_queue_url(QueueName=name)['QueueUrl'] - except is_boto3_error_code('AWS.SimpleQueueService.NonExistentQueue'): + return client.get_queue_url(QueueName=name)["QueueUrl"] + except is_boto3_error_code("AWS.SimpleQueueService.NonExistentQueue"): return None @@ -296,13 +296,13 @@ def describe_queue(client, queue_url): """ Description a queue in snake format """ - attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['All'], aws_retry=True)['Attributes'] + attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["All"], aws_retry=True)["Attributes"] description = dict(attributes) - description.pop('Policy', None) - description.pop('RedrivePolicy', None) + description.pop("Policy", None) + description.pop("RedrivePolicy", None) description = camel_dict_to_snake_dict(description) - description['policy'] = attributes.get('Policy', None) - description['redrive_policy'] = attributes.get('RedrivePolicy', None) + description["policy"] = attributes.get("Policy", None) + description["redrive_policy"] = attributes.get("RedrivePolicy", None) # Boto3 returns everything as a string, convert them back to integers/dicts if # that's what we expected. @@ -310,12 +310,12 @@ def describe_queue(client, queue_url): if value is None: continue - if key in ['policy', 'redrive_policy']: + if key in ["policy", "redrive_policy"]: policy = json.loads(value) description[key] = policy continue - if key == 'content_based_deduplication': + if key == "content_based_deduplication": try: description[key] = bool(value) except (TypeError, ValueError): @@ -331,49 +331,48 @@ def describe_queue(client, queue_url): def create_or_update_sqs_queue(client, module): - is_fifo = (module.params.get('queue_type') == 'fifo') - kms_master_key_id = module.params.get('kms_master_key_id') + is_fifo = module.params.get("queue_type") == "fifo" + kms_master_key_id = module.params.get("kms_master_key_id") queue_name = get_queue_name(module, is_fifo) result = dict( name=queue_name, - region=module.params.get('region'), + region=module.params.get("region"), changed=False, ) queue_url = get_queue_url(client, queue_name) - result['queue_url'] = queue_url + result["queue_url"] = queue_url # Create a dict() to hold attributes that will be passed to boto3 create_attributes = {} if not queue_url: if is_fifo: - create_attributes['FifoQueue'] = "True" + create_attributes["FifoQueue"] = "True" if kms_master_key_id: - create_attributes['KmsMasterKeyId'] = kms_master_key_id - result['changed'] = True + create_attributes["KmsMasterKeyId"] = kms_master_key_id + result["changed"] = True if module.check_mode: return result - queue_url = client.create_queue(QueueName=queue_name, Attributes=create_attributes, aws_retry=True)['QueueUrl'] + queue_url = client.create_queue(QueueName=queue_name, Attributes=create_attributes, aws_retry=True)["QueueUrl"] changed, arn = update_sqs_queue(module, client, queue_url) - result['changed'] |= changed - result['queue_arn'] = arn + result["changed"] |= changed + result["queue_arn"] = arn changed, tags = update_tags(client, queue_url, module) - result['changed'] |= changed - result['tags'] = tags + result["changed"] |= changed + result["tags"] = tags result.update(describe_queue(client, queue_url)) COMPATABILITY_KEYS = dict( - delay_seconds='delivery_delay', - receive_message_wait_time_seconds='receive_message_wait_time', - visibility_timeout='default_visibility_timeout', - kms_data_key_reuse_period_seconds='kms_data_key_reuse_period', + delay_seconds="delivery_delay", + receive_message_wait_time_seconds="receive_message_wait_time", + visibility_timeout="default_visibility_timeout", + kms_data_key_reuse_period_seconds="kms_data_key_reuse_period", ) for key in list(result.keys()): - # The return values changed between boto and boto3, add the old keys too # for backwards compatibility return_name = COMPATABILITY_KEYS.get(key) @@ -386,30 +385,32 @@ def create_or_update_sqs_queue(client, module): def update_sqs_queue(module, client, queue_url): check_mode = module.check_mode changed = False - existing_attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['All'], aws_retry=True)['Attributes'] + existing_attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["All"], aws_retry=True)[ + "Attributes" + ] new_attributes = snake_dict_to_camel_dict(module.params, capitalize_first=True) attributes_to_set = dict() # Boto3 SQS deals with policies as strings, we want to deal with them as # dicts - if module.params.get('policy') is not None: - policy = module.params.get('policy') - current_value = existing_attributes.get('Policy', '{}') + if module.params.get("policy") is not None: + policy = module.params.get("policy") + current_value = existing_attributes.get("Policy", "{}") current_policy = json.loads(current_value) if compare_policies(current_policy, policy): - attributes_to_set['Policy'] = json.dumps(policy) + attributes_to_set["Policy"] = json.dumps(policy) changed = True - if module.params.get('redrive_policy') is not None: - policy = module.params.get('redrive_policy') - current_value = existing_attributes.get('RedrivePolicy', '{}') + if module.params.get("redrive_policy") is not None: + policy = module.params.get("redrive_policy") + current_value = existing_attributes.get("RedrivePolicy", "{}") current_policy = json.loads(current_value) if compare_policies(current_policy, policy): - attributes_to_set['RedrivePolicy'] = json.dumps(policy) + attributes_to_set["RedrivePolicy"] = json.dumps(policy) changed = True for attribute, value in existing_attributes.items(): # We handle these as a special case because they're IAM policies - if attribute in ['Policy', 'RedrivePolicy']: + if attribute in ["Policy", "RedrivePolicy"]: continue if attribute not in new_attributes.keys(): @@ -434,23 +435,19 @@ def update_sqs_queue(module, client, queue_url): if changed and not check_mode: client.set_queue_attributes(QueueUrl=queue_url, Attributes=attributes_to_set, aws_retry=True) - return changed, existing_attributes.get('queue_arn') + return changed, existing_attributes.get("queue_arn") def delete_sqs_queue(client, module): - is_fifo = (module.params.get('queue_type') == 'fifo') + is_fifo = module.params.get("queue_type") == "fifo" queue_name = get_queue_name(module, is_fifo) - result = dict( - name=queue_name, - region=module.params.get('region'), - changed=False - ) + result = dict(name=queue_name, region=module.params.get("region"), changed=False) queue_url = get_queue_url(client, queue_name) if not queue_url: return result - result['changed'] = bool(queue_url) + result["changed"] = bool(queue_url) if not module.check_mode: AWSRetry.jittered_backoff()(client.delete_queue)(QueueUrl=queue_url) @@ -458,13 +455,13 @@ def delete_sqs_queue(client, module): def update_tags(client, queue_url, module): - new_tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') + new_tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") if new_tags is None: return False, {} try: - existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True)['Tags'] + existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True)["Tags"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, KeyError) as e: existing_tags = {} @@ -475,7 +472,7 @@ def update_tags(client, queue_url, module): client.untag_queue(QueueUrl=queue_url, TagKeys=tags_to_remove, aws_retry=True) if tags_to_add: client.tag_queue(QueueUrl=queue_url, Tags=tags_to_add) - existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True).get('Tags', {}) + existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True).get("Tags", {}) else: existing_tags = new_tags @@ -484,41 +481,40 @@ def update_tags(client, queue_url, module): def main(): - argument_spec = dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - name=dict(type='str', required=True), - queue_type=dict(type='str', default='standard', choices=['standard', 'fifo']), - delay_seconds=dict(type='int', aliases=['delivery_delay']), - maximum_message_size=dict(type='int'), - message_retention_period=dict(type='int'), - policy=dict(type='dict'), - receive_message_wait_time_seconds=dict(type='int', aliases=['receive_message_wait_time']), - redrive_policy=dict(type='dict'), - visibility_timeout=dict(type='int', aliases=['default_visibility_timeout']), - kms_master_key_id=dict(type='str'), - fifo_throughput_limit=dict(type='str', choices=["perQueue", "perMessageGroupId"]), - deduplication_scope=dict(type='str', choices=['queue', 'messageGroup']), - kms_data_key_reuse_period_seconds=dict(type='int', aliases=['kms_data_key_reuse_period'], no_log=False), - content_based_deduplication=dict(type='bool'), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + state=dict(type="str", default="present", choices=["present", "absent"]), + name=dict(type="str", required=True), + queue_type=dict(type="str", default="standard", choices=["standard", "fifo"]), + delay_seconds=dict(type="int", aliases=["delivery_delay"]), + maximum_message_size=dict(type="int"), + message_retention_period=dict(type="int"), + policy=dict(type="dict"), + receive_message_wait_time_seconds=dict(type="int", aliases=["receive_message_wait_time"]), + redrive_policy=dict(type="dict"), + visibility_timeout=dict(type="int", aliases=["default_visibility_timeout"]), + kms_master_key_id=dict(type="str"), + fifo_throughput_limit=dict(type="str", choices=["perQueue", "perMessageGroupId"]), + deduplication_scope=dict(type="str", choices=["queue", "messageGroup"]), + kms_data_key_reuse_period_seconds=dict(type="int", aliases=["kms_data_key_reuse_period"], no_log=False), + content_based_deduplication=dict(type="bool"), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - state = module.params.get('state') - retry_decorator = AWSRetry.jittered_backoff(catch_extra_error_codes=['AWS.SimpleQueueService.NonExistentQueue']) + state = module.params.get("state") + retry_decorator = AWSRetry.jittered_backoff(catch_extra_error_codes=["AWS.SimpleQueueService.NonExistentQueue"]) try: - client = module.client('sqs', retry_decorator=retry_decorator) - if state == 'present': + client = module.client("sqs", retry_decorator=retry_decorator) + if state == "present": result = create_or_update_sqs_queue(client, module) - elif state == 'absent': + elif state == "absent": result = delete_sqs_queue(client, module) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to control sqs queue') + module.fail_json_aws(e, msg="Failed to control sqs queue") else: module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ssm_inventory_info.py b/plugins/modules/ssm_inventory_info.py index 4242596f128..c5b84909738 100644 --- a/plugins/modules/ssm_inventory_info.py +++ b/plugins/modules/ssm_inventory_info.py @@ -80,15 +80,9 @@ def get_ssm_inventory(connection, filters): def execute_module(module, connection): - instance_id = module.params.get("instance_id") try: - filters = [ - { - "Key": "AWS:InstanceInformation.InstanceId", - "Values": [instance_id] - } - ] + filters = [{"Key": "AWS:InstanceInformation.InstanceId", "Values": [instance_id]}] response = get_ssm_inventory(connection, filters) entities = response.get("Entities", []) diff --git a/plugins/modules/ssm_parameter.py b/plugins/modules/ssm_parameter.py index 493d2b294c4..aefafca009c 100644 --- a/plugins/modules/ssm_parameter.py +++ b/plugins/modules/ssm_parameter.py @@ -264,7 +264,7 @@ class ParameterWaiterFactory(BaseWaiterFactory): def __init__(self, module): - client = module.client('ssm') + client = module.client("ssm") super(ParameterWaiterFactory, self).__init__(module, client) @property @@ -272,22 +272,24 @@ def _waiter_model_data(self): data = super(ParameterWaiterFactory, self)._waiter_model_data ssm_data = dict( parameter_exists=dict( - operation='DescribeParameters', - delay=1, maxAttempts=20, + operation="DescribeParameters", + delay=1, + maxAttempts=20, acceptors=[ - dict(state='retry', matcher='error', expected='ParameterNotFound'), - dict(state='retry', matcher='path', expected=True, argument='length(Parameters[].Name) == `0`'), - dict(state='success', matcher='path', expected=True, argument='length(Parameters[].Name) > `0`'), - ] + dict(state="retry", matcher="error", expected="ParameterNotFound"), + dict(state="retry", matcher="path", expected=True, argument="length(Parameters[].Name) == `0`"), + dict(state="success", matcher="path", expected=True, argument="length(Parameters[].Name) > `0`"), + ], ), parameter_deleted=dict( - operation='DescribeParameters', - delay=1, maxAttempts=20, + operation="DescribeParameters", + delay=1, + maxAttempts=20, acceptors=[ - dict(state='retry', matcher='path', expected=True, argument='length(Parameters[].Name) > `0`'), - dict(state='success', matcher='path', expected=True, argument='length(Parameters[]) == `0`'), - dict(state='success', matcher='error', expected='ParameterNotFound'), - ] + dict(state="retry", matcher="path", expected=True, argument="length(Parameters[].Name) > `0`"), + dict(state="success", matcher="path", expected=True, argument="length(Parameters[]) == `0`"), + dict(state="success", matcher="error", expected="ParameterNotFound"), + ], ), ) data.update(ssm_data) @@ -298,10 +300,10 @@ def _wait_exists(client, module, name): if module.check_mode: return wf = ParameterWaiterFactory(module) - waiter = wf.get_waiter('parameter_exists') + waiter = wf.get_waiter("parameter_exists") try: waiter.wait( - ParameterFilters=[{'Key': 'Name', "Values": [name]}], + ParameterFilters=[{"Key": "Name", "Values": [name]}], ) except botocore.exceptions.WaiterError: module.warn("Timeout waiting for parameter to exist") @@ -316,7 +318,7 @@ def _wait_updated(client, module, name, version): for x in range(1, 10): try: parameter = describe_parameter(client, module, ParameterFilters=[{"Key": "Name", "Values": [name]}]) - if parameter.get('Version', 0) > version: + if parameter.get("Version", 0) > version: return except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe parameter while waiting for update") @@ -327,10 +329,10 @@ def _wait_deleted(client, module, name): if module.check_mode: return wf = ParameterWaiterFactory(module) - waiter = wf.get_waiter('parameter_deleted') + waiter = wf.get_waiter("parameter_deleted") try: waiter.wait( - ParameterFilters=[{'Key': 'Name', "Values": [name]}], + ParameterFilters=[{"Key": "Name", "Values": [name]}], ) except botocore.exceptions.WaiterError: module.warn("Timeout waiting for parameter to exist") @@ -340,24 +342,27 @@ def _wait_deleted(client, module, name): def tag_parameter(client, module, parameter_name, tags): try: - return client.add_tags_to_resource(aws_retry=True, ResourceType='Parameter', - ResourceId=parameter_name, Tags=tags) + return client.add_tags_to_resource( + aws_retry=True, ResourceType="Parameter", ResourceId=parameter_name, Tags=tags + ) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Failed to add tag(s) to parameter") def untag_parameter(client, module, parameter_name, tag_keys): try: - return client.remove_tags_from_resource(aws_retry=True, ResourceType='Parameter', - ResourceId=parameter_name, TagKeys=tag_keys) + return client.remove_tags_from_resource( + aws_retry=True, ResourceType="Parameter", ResourceId=parameter_name, TagKeys=tag_keys + ) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Failed to remove tag(s) from parameter") def get_parameter_tags(client, module, parameter_name): try: - tags = client.list_tags_for_resource(aws_retry=True, ResourceType='Parameter', - ResourceId=parameter_name)['TagList'] + tags = client.list_tags_for_resource(aws_retry=True, ResourceType="Parameter", ResourceId=parameter_name)[ + "TagList" + ] tags_dict = boto3_tag_list_to_ansible_dict(tags) return tags_dict except (BotoCoreError, ClientError) as e: @@ -372,14 +377,12 @@ def update_parameter_tags(client, module, parameter_name, supplied_tags): return False, response current_tags = get_parameter_tags(client, module, parameter_name) - tags_to_add, tags_to_remove = compare_aws_tags(current_tags, supplied_tags, - module.params.get('purge_tags')) + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, supplied_tags, module.params.get("purge_tags")) if tags_to_add: if module.check_mode: return True, response - response = tag_parameter(client, module, parameter_name, - ansible_dict_to_boto3_tag_list(tags_to_add)) + response = tag_parameter(client, module, parameter_name, ansible_dict_to_boto3_tag_list(tags_to_add)) changed = True if tags_to_remove: if module.check_mode: @@ -407,16 +410,16 @@ def update_parameter(client, module, **args): @AWSRetry.jittered_backoff() def describe_parameter(client, module, **args): - paginator = client.get_paginator('describe_parameters') + paginator = client.get_paginator("describe_parameters") existing_parameter = paginator.paginate(**args).build_full_result() - if not existing_parameter['Parameters']: + if not existing_parameter["Parameters"]: return None - tags_dict = get_parameter_tags(client, module, module.params.get('name')) - existing_parameter['Parameters'][0]['tags'] = tags_dict + tags_dict = get_parameter_tags(client, module, module.params.get("name")) + existing_parameter["Parameters"][0]["tags"] = tags_dict - return existing_parameter['Parameters'][0] + return existing_parameter["Parameters"][0] def create_update_parameter(client, module): @@ -424,82 +427,78 @@ def create_update_parameter(client, module): existing_parameter = None response = {} - args = dict( - Name=module.params.get('name'), - Type=module.params.get('string_type'), - Tier=module.params.get('tier') - ) + args = dict(Name=module.params.get("name"), Type=module.params.get("string_type"), Tier=module.params.get("tier")) - if (module.params.get('overwrite_value') in ("always", "changed")): + if module.params.get("overwrite_value") in ("always", "changed"): args.update(Overwrite=True) else: args.update(Overwrite=False) - if module.params.get('value') is not None: - args.update(Value=module.params.get('value')) + if module.params.get("value") is not None: + args.update(Value=module.params.get("value")) - if module.params.get('description'): - args.update(Description=module.params.get('description')) + if module.params.get("description"): + args.update(Description=module.params.get("description")) - if module.params.get('string_type') == 'SecureString': - args.update(KeyId=module.params.get('key_id')) + if module.params.get("string_type") == "SecureString": + args.update(KeyId=module.params.get("key_id")) try: - existing_parameter = client.get_parameter(aws_retry=True, Name=args['Name'], WithDecryption=True) + existing_parameter = client.get_parameter(aws_retry=True, Name=args["Name"], WithDecryption=True) except botocore.exceptions.ClientError: pass except botocore.exceptions.BotoCoreError as e: module.fail_json_aws(e, msg="fetching parameter") if existing_parameter: - original_version = existing_parameter['Parameter']['Version'] - if 'Value' not in args: - args['Value'] = existing_parameter['Parameter']['Value'] + original_version = existing_parameter["Parameter"]["Version"] + if "Value" not in args: + args["Value"] = existing_parameter["Parameter"]["Value"] - if (module.params.get('overwrite_value') == 'always'): + if module.params.get("overwrite_value") == "always": (changed, response) = update_parameter(client, module, **args) - elif (module.params.get('overwrite_value') == 'changed'): - if existing_parameter['Parameter']['Type'] != args['Type']: + elif module.params.get("overwrite_value") == "changed": + if existing_parameter["Parameter"]["Type"] != args["Type"]: (changed, response) = update_parameter(client, module, **args) - elif existing_parameter['Parameter']['Value'] != args['Value']: + elif existing_parameter["Parameter"]["Value"] != args["Value"]: (changed, response) = update_parameter(client, module, **args) - elif args.get('Description'): + elif args.get("Description"): # Description field not available from get_parameter function so get it from describe_parameters try: describe_existing_parameter = describe_parameter( - client, module, - ParameterFilters=[{"Key": "Name", "Values": [args['Name']]}]) + client, module, ParameterFilters=[{"Key": "Name", "Values": [args["Name"]]}] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="getting description value") - if describe_existing_parameter.get('Description') != args['Description']: + if describe_existing_parameter.get("Description") != args["Description"]: (changed, response) = update_parameter(client, module, **args) if changed: - _wait_updated(client, module, module.params.get('name'), original_version) + _wait_updated(client, module, module.params.get("name"), original_version) # Handle tag updates for existing parameters - if module.params.get('overwrite_value') != 'never': + if module.params.get("overwrite_value") != "never": tags_changed, tags_response = update_parameter_tags( - client, module, existing_parameter['Parameter']['Name'], - module.params.get('tags')) + client, module, existing_parameter["Parameter"]["Name"], module.params.get("tags") + ) changed = changed or tags_changed if tags_response: - response['tag_updates'] = tags_response + response["tag_updates"] = tags_response else: # Add tags in initial creation request - if module.params.get('tags'): - args.update(Tags=ansible_dict_to_boto3_tag_list(module.params.get('tags'))) + if module.params.get("tags"): + args.update(Tags=ansible_dict_to_boto3_tag_list(module.params.get("tags"))) # Overwrite=True conflicts with tags and is not needed for new param args.update(Overwrite=False) (changed, response) = update_parameter(client, module, **args) - _wait_exists(client, module, module.params.get('name')) + _wait_exists(client, module, module.params.get("name")) return changed, response @@ -508,8 +507,8 @@ def delete_parameter(client, module): response = {} try: - existing_parameter = client.get_parameter(aws_retry=True, Name=module.params.get('name'), WithDecryption=True) - except is_boto3_error_code('ParameterNotFound'): + existing_parameter = client.get_parameter(aws_retry=True, Name=module.params.get("name"), WithDecryption=True) + except is_boto3_error_code("ParameterNotFound"): return False, {} except botocore.exceptions.ClientError: # If we can't describe the parameter we may still be able to delete it @@ -523,23 +522,23 @@ def delete_parameter(client, module): return True, {} try: - response = client.delete_parameter( - aws_retry=True, - Name=module.params.get('name') - ) - except is_boto3_error_code('ParameterNotFound'): + response = client.delete_parameter(aws_retry=True, Name=module.params.get("name")) + except is_boto3_error_code("ParameterNotFound"): return False, {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="deleting parameter") - _wait_deleted(client, module, module.params.get('name')) + _wait_deleted(client, module, module.params.get("name")) return True, response def setup_client(module): retry_decorator = AWSRetry.jittered_backoff() - connection = module.client('ssm', retry_decorator=retry_decorator) + connection = module.client("ssm", retry_decorator=retry_decorator) return connection @@ -548,14 +547,14 @@ def setup_module_object(): name=dict(required=True), description=dict(), value=dict(required=False, no_log=True), - state=dict(default='present', choices=['present', 'absent']), - string_type=dict(default='String', choices=['String', 'StringList', 'SecureString'], aliases=['type']), - decryption=dict(default=True, type='bool'), + state=dict(default="present", choices=["present", "absent"]), + string_type=dict(default="String", choices=["String", "StringList", "SecureString"], aliases=["type"]), + decryption=dict(default=True, type="bool"), key_id=dict(default="alias/aws/ssm"), - overwrite_value=dict(default='changed', choices=['never', 'changed', 'always']), - tier=dict(default='Standard', choices=['Standard', 'Advanced', 'Intelligent-Tiering']), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + overwrite_value=dict(default="changed", choices=["never", "changed", "always"]), + tier=dict(default="Standard", choices=["Standard", "Advanced", "Intelligent-Tiering"]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), ) return AnsibleAWSModule( @@ -566,7 +565,7 @@ def setup_module_object(): def main(): module = setup_module_object() - state = module.params.get('state') + state = module.params.get("state") client = setup_client(module) invocations = { @@ -579,18 +578,17 @@ def main(): try: parameter_metadata = describe_parameter( - client, module, - ParameterFilters=[{"Key": "Name", "Values": [module.params.get('name')]}]) - except is_boto3_error_code('ParameterNotFound'): + client, module, ParameterFilters=[{"Key": "Name", "Values": [module.params.get("name")]}] + ) + except is_boto3_error_code("ParameterNotFound"): return False, {} except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="to describe parameter") if parameter_metadata: - result['parameter_metadata'] = camel_dict_to_snake_dict(parameter_metadata, - ignore_list=['tags']) + result["parameter_metadata"] = camel_dict_to_snake_dict(parameter_metadata, ignore_list=["tags"]) module.exit_json(changed=changed, **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/stepfunctions_state_machine.py b/plugins/modules/stepfunctions_state_machine.py index 0f4b3ec1397..4bbd1503ab8 100644 --- a/plugins/modules/stepfunctions_state_machine.py +++ b/plugins/modules/stepfunctions_state_machine.py @@ -101,36 +101,36 @@ def manage_state_machine(state, sfn_client, module): state_machine_arn = get_state_machine_arn(sfn_client, module) - if state == 'present': + if state == "present": if state_machine_arn is None: create(sfn_client, module) else: update(state_machine_arn, sfn_client, module) - elif state == 'absent': + elif state == "absent": if state_machine_arn is not None: remove(state_machine_arn, sfn_client, module) - check_mode(module, msg='State is up-to-date.') + check_mode(module, msg="State is up-to-date.") module.exit_json(changed=False, state_machine_arn=state_machine_arn) def create(sfn_client, module): - check_mode(module, msg='State machine would be created.', changed=True) + check_mode(module, msg="State machine would be created.", changed=True) - tags = module.params.get('tags') - sfn_tags = ansible_dict_to_boto3_tag_list(tags, tag_name_key_name='key', tag_value_key_name='value') if tags else [] + tags = module.params.get("tags") + sfn_tags = ansible_dict_to_boto3_tag_list(tags, tag_name_key_name="key", tag_value_key_name="value") if tags else [] state_machine = sfn_client.create_state_machine( - name=module.params.get('name'), - definition=module.params.get('definition'), - roleArn=module.params.get('role_arn'), - tags=sfn_tags + name=module.params.get("name"), + definition=module.params.get("definition"), + roleArn=module.params.get("role_arn"), + tags=sfn_tags, ) - module.exit_json(changed=True, state_machine_arn=state_machine.get('stateMachineArn')) + module.exit_json(changed=True, state_machine_arn=state_machine.get("stateMachineArn")) def remove(state_machine_arn, sfn_client, module): - check_mode(module, msg='State machine would be deleted: {0}'.format(state_machine_arn), changed=True) + check_mode(module, msg="State machine would be deleted: {0}".format(state_machine_arn), changed=True) sfn_client.delete_state_machine(stateMachineArn=state_machine_arn) module.exit_json(changed=True, state_machine_arn=state_machine_arn) @@ -140,29 +140,28 @@ def update(state_machine_arn, sfn_client, module): tags_to_add, tags_to_remove = compare_tags(state_machine_arn, sfn_client, module) if params_changed(state_machine_arn, sfn_client, module) or tags_to_add or tags_to_remove: - check_mode(module, msg='State machine would be updated: {0}'.format(state_machine_arn), changed=True) + check_mode(module, msg="State machine would be updated: {0}".format(state_machine_arn), changed=True) sfn_client.update_state_machine( stateMachineArn=state_machine_arn, - definition=module.params.get('definition'), - roleArn=module.params.get('role_arn') - ) - sfn_client.untag_resource( - resourceArn=state_machine_arn, - tagKeys=tags_to_remove + definition=module.params.get("definition"), + roleArn=module.params.get("role_arn"), ) + sfn_client.untag_resource(resourceArn=state_machine_arn, tagKeys=tags_to_remove) sfn_client.tag_resource( resourceArn=state_machine_arn, - tags=ansible_dict_to_boto3_tag_list(tags_to_add, tag_name_key_name='key', tag_value_key_name='value') + tags=ansible_dict_to_boto3_tag_list(tags_to_add, tag_name_key_name="key", tag_value_key_name="value"), ) module.exit_json(changed=True, state_machine_arn=state_machine_arn) def compare_tags(state_machine_arn, sfn_client, module): - new_tags = module.params.get('tags') - current_tags = sfn_client.list_tags_for_resource(resourceArn=state_machine_arn).get('tags') - return compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags), new_tags if new_tags else {}, module.params.get('purge_tags')) + new_tags = module.params.get("tags") + current_tags = sfn_client.list_tags_for_resource(resourceArn=state_machine_arn).get("tags") + return compare_aws_tags( + boto3_tag_list_to_ansible_dict(current_tags), new_tags if new_tags else {}, module.params.get("purge_tags") + ) def params_changed(state_machine_arn, sfn_client, module): @@ -171,7 +170,9 @@ def params_changed(state_machine_arn, sfn_client, module): from the existing state machine parameters. """ current = sfn_client.describe_state_machine(stateMachineArn=state_machine_arn) - return current.get('definition') != module.params.get('definition') or current.get('roleArn') != module.params.get('role_arn') + return current.get("definition") != module.params.get("definition") or current.get("roleArn") != module.params.get( + "role_arn" + ) def get_state_machine_arn(sfn_client, module): @@ -179,42 +180,42 @@ def get_state_machine_arn(sfn_client, module): Finds the state machine ARN based on the name parameter. Returns None if there is no state machine with this name. """ - target_name = module.params.get('name') - all_state_machines = sfn_client.list_state_machines(aws_retry=True).get('stateMachines') + target_name = module.params.get("name") + all_state_machines = sfn_client.list_state_machines(aws_retry=True).get("stateMachines") for state_machine in all_state_machines: - if state_machine.get('name') == target_name: - return state_machine.get('stateMachineArn') + if state_machine.get("name") == target_name: + return state_machine.get("stateMachineArn") -def check_mode(module, msg='', changed=False): +def check_mode(module, msg="", changed=False): if module.check_mode: module.exit_json(changed=changed, output=msg) def main(): module_args = dict( - name=dict(type='str', required=True), - definition=dict(type='json'), - role_arn=dict(type='str'), - state=dict(choices=['present', 'absent'], default='present'), - tags=dict(default=None, type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), + name=dict(type="str", required=True), + definition=dict(type="json"), + role_arn=dict(type="str"), + state=dict(choices=["present", "absent"], default="present"), + tags=dict(default=None, type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), ) module = AnsibleAWSModule( argument_spec=module_args, - required_if=[('state', 'present', ['role_arn']), ('state', 'present', ['definition'])], - supports_check_mode=True + required_if=[("state", "present", ["role_arn"]), ("state", "present", ["definition"])], + supports_check_mode=True, ) - sfn_client = module.client('stepfunctions', retry_decorator=AWSRetry.jittered_backoff(retries=5)) - state = module.params.get('state') + sfn_client = module.client("stepfunctions", retry_decorator=AWSRetry.jittered_backoff(retries=5)) + state = module.params.get("state") try: manage_state_machine(state, sfn_client, module) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to manage state machine') + module.fail_json_aws(e, msg="Failed to manage state machine") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/stepfunctions_state_machine_execution.py b/plugins/modules/stepfunctions_state_machine_execution.py index 0b6858fbf42..b7a9f7efba8 100644 --- a/plugins/modules/stepfunctions_state_machine_execution.py +++ b/plugins/modules/stepfunctions_state_machine_execution.py @@ -100,95 +100,90 @@ def start_execution(module, sfn_client): - ''' + """ start_execution uses execution name to determine if a previous execution already exists. If an execution by the provided name exists, call client.start_execution will not be called. - ''' + """ - state_machine_arn = module.params.get('state_machine_arn') - name = module.params.get('name') - execution_input = module.params.get('execution_input') + state_machine_arn = module.params.get("state_machine_arn") + name = module.params.get("name") + execution_input = module.params.get("execution_input") try: # list_executions is eventually consistent - page_iterators = sfn_client.get_paginator('list_executions').paginate(stateMachineArn=state_machine_arn) + page_iterators = sfn_client.get_paginator("list_executions").paginate(stateMachineArn=state_machine_arn) - for execution in page_iterators.build_full_result()['executions']: - if name == execution['name']: - check_mode(module, msg='State machine execution already exists.', changed=False) + for execution in page_iterators.build_full_result()["executions"]: + if name == execution["name"]: + check_mode(module, msg="State machine execution already exists.", changed=False) module.exit_json(changed=False) - check_mode(module, msg='State machine execution would be started.', changed=True) - res_execution = sfn_client.start_execution( - stateMachineArn=state_machine_arn, - name=name, - input=execution_input - ) - except is_boto3_error_code('ExecutionAlreadyExists'): + check_mode(module, msg="State machine execution would be started.", changed=True) + res_execution = sfn_client.start_execution(stateMachineArn=state_machine_arn, name=name, input=execution_input) + except is_boto3_error_code("ExecutionAlreadyExists"): # this will never be executed anymore module.exit_json(changed=False) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to start execution.") module.exit_json(changed=True, **camel_dict_to_snake_dict(res_execution)) def stop_execution(module, sfn_client): - - cause = module.params.get('cause') - error = module.params.get('error') - execution_arn = module.params.get('execution_arn') + cause = module.params.get("cause") + error = module.params.get("error") + execution_arn = module.params.get("execution_arn") try: # describe_execution is eventually consistent - execution_status = sfn_client.describe_execution(executionArn=execution_arn)['status'] - if execution_status != 'RUNNING': - check_mode(module, msg='State machine execution is not running.', changed=False) + execution_status = sfn_client.describe_execution(executionArn=execution_arn)["status"] + if execution_status != "RUNNING": + check_mode(module, msg="State machine execution is not running.", changed=False) module.exit_json(changed=False) - check_mode(module, msg='State machine execution would be stopped.', changed=True) - res = sfn_client.stop_execution( - executionArn=execution_arn, - cause=cause, - error=error - ) + check_mode(module, msg="State machine execution would be stopped.", changed=True) + res = sfn_client.stop_execution(executionArn=execution_arn, cause=cause, error=error) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to stop execution.") module.exit_json(changed=True, **camel_dict_to_snake_dict(res)) -def check_mode(module, msg='', changed=False): +def check_mode(module, msg="", changed=False): if module.check_mode: module.exit_json(changed=changed, output=msg) def main(): module_args = dict( - action=dict(choices=['start', 'stop'], default='start'), - name=dict(type='str'), - execution_input=dict(type='json', default={}), - state_machine_arn=dict(type='str'), - cause=dict(type='str', default=''), - error=dict(type='str', default=''), - execution_arn=dict(type='str') + action=dict(choices=["start", "stop"], default="start"), + name=dict(type="str"), + execution_input=dict(type="json", default={}), + state_machine_arn=dict(type="str"), + cause=dict(type="str", default=""), + error=dict(type="str", default=""), + execution_arn=dict(type="str"), ) module = AnsibleAWSModule( argument_spec=module_args, - required_if=[('action', 'start', ['name', 'state_machine_arn']), - ('action', 'stop', ['execution_arn']), - ], - supports_check_mode=True + required_if=[ + ("action", "start", ["name", "state_machine_arn"]), + ("action", "stop", ["execution_arn"]), + ], + supports_check_mode=True, ) - sfn_client = module.client('stepfunctions') + sfn_client = module.client("stepfunctions") - action = module.params.get('action') + action = module.params.get("action") if action == "start": start_execution(module, sfn_client) else: stop_execution(module, sfn_client) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/storagegateway_info.py b/plugins/modules/storagegateway_info.py index 854d1cbb0d8..5ff72399786 100644 --- a/plugins/modules/storagegateway_info.py +++ b/plugins/modules/storagegateway_info.py @@ -187,21 +187,21 @@ class SGWInformationManager(object): def __init__(self, client, module): self.client = client self.module = module - self.name = self.module.params.get('name') + self.name = self.module.params.get("name") def fetch(self): gateways = self.list_gateways() for gateway in gateways: - if self.module.params.get('gather_local_disks'): + if self.module.params.get("gather_local_disks"): self.list_local_disks(gateway) # File share gateway - if gateway["gateway_type"] == "FILE_S3" and self.module.params.get('gather_file_shares'): + if gateway["gateway_type"] == "FILE_S3" and self.module.params.get("gather_file_shares"): self.list_gateway_file_shares(gateway) # Volume tape gateway - elif gateway["gateway_type"] == "VTL" and self.module.params.get('gather_tapes'): + elif gateway["gateway_type"] == "VTL" and self.module.params.get("gather_tapes"): self.list_gateway_vtl(gateway) # iSCSI gateway - elif gateway["gateway_type"] in ["CACHED", "STORED"] and self.module.params.get('gather_volumes'): + elif gateway["gateway_type"] in ["CACHED", "STORED"] and self.module.params.get("gather_volumes"): self.list_gateway_volumes(gateway) self.module.exit_json(gateways=gateways) @@ -209,12 +209,13 @@ def fetch(self): """ List all storage gateways for the AWS endpoint. """ + def list_gateways(self): try: - paginator = self.client.get_paginator('list_gateways') + paginator = self.client.get_paginator("list_gateways") response = paginator.paginate( PaginationConfig={ - 'PageSize': 100, + "PageSize": 100, } ).build_full_result() @@ -231,6 +232,7 @@ def list_gateways(self): Read file share objects from AWS API response. Drop the gateway_arn attribute from response, as it will be duplicate with parent object. """ + @staticmethod def _read_gateway_fileshare_response(fileshares, aws_reponse): for share in aws_reponse["FileShareInfoList"]: @@ -244,22 +246,16 @@ def _read_gateway_fileshare_response(fileshares, aws_reponse): """ List file shares attached to AWS storage gateway when in S3 mode. """ + def list_gateway_file_shares(self, gateway): try: - response = self.client.list_file_shares( - GatewayARN=gateway["gateway_arn"], - Limit=100 - ) + response = self.client.list_file_shares(GatewayARN=gateway["gateway_arn"], Limit=100) gateway["file_shares"] = [] marker = self._read_gateway_fileshare_response(gateway["file_shares"], response) while marker is not None: - response = self.client.list_file_shares( - GatewayARN=gateway["gateway_arn"], - Marker=marker, - Limit=100 - ) + response = self.client.list_file_shares(GatewayARN=gateway["gateway_arn"], Marker=marker, Limit=100) marker = self._read_gateway_fileshare_response(gateway["file_shares"], response) except (BotoCoreError, ClientError) as e: @@ -268,10 +264,13 @@ def list_gateway_file_shares(self, gateway): """ List storage gateway local disks """ + def list_local_disks(self, gateway): try: - gateway['local_disks'] = [camel_dict_to_snake_dict(disk) for disk in - self.client.list_local_disks(GatewayARN=gateway["gateway_arn"])['Disks']] + gateway["local_disks"] = [ + camel_dict_to_snake_dict(disk) + for disk in self.client.list_local_disks(GatewayARN=gateway["gateway_arn"])["Disks"] + ] except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Couldn't list storage gateway local disks") @@ -279,6 +278,7 @@ def list_local_disks(self, gateway): Read tape objects from AWS API response. Drop the gateway_arn attribute from response, as it will be duplicate with parent object. """ + @staticmethod def _read_gateway_tape_response(tapes, aws_response): for tape in aws_response["TapeInfos"]: @@ -292,20 +292,16 @@ def _read_gateway_tape_response(tapes, aws_response): """ List VTL & VTS attached to AWS storage gateway in VTL mode """ + def list_gateway_vtl(self, gateway): try: - response = self.client.list_tapes( - Limit=100 - ) + response = self.client.list_tapes(Limit=100) gateway["tapes"] = [] marker = self._read_gateway_tape_response(gateway["tapes"], response) while marker is not None: - response = self.client.list_tapes( - Marker=marker, - Limit=100 - ) + response = self.client.list_tapes(Marker=marker, Limit=100) marker = self._read_gateway_tape_response(gateway["tapes"], response) except (BotoCoreError, ClientError) as e: @@ -314,14 +310,15 @@ def list_gateway_vtl(self, gateway): """ List volumes attached to AWS storage gateway in CACHED or STORAGE mode """ + def list_gateway_volumes(self, gateway): try: - paginator = self.client.get_paginator('list_volumes') + paginator = self.client.get_paginator("list_volumes") response = paginator.paginate( GatewayARN=gateway["gateway_arn"], PaginationConfig={ - 'PageSize': 100, - } + "PageSize": 100, + }, ).build_full_result() gateway["volumes"] = [] @@ -339,10 +336,10 @@ def list_gateway_volumes(self, gateway): def main(): argument_spec = dict( - gather_local_disks=dict(type='bool', default=True), - gather_tapes=dict(type='bool', default=True), - gather_file_shares=dict(type='bool', default=True), - gather_volumes=dict(type='bool', default=True) + gather_local_disks=dict(type="bool", default=True), + gather_tapes=dict(type="bool", default=True), + gather_file_shares=dict(type="bool", default=True), + gather_volumes=dict(type="bool", default=True), ) module = AnsibleAWSModule( @@ -350,13 +347,13 @@ def main(): supports_check_mode=True, ) - client = module.client('storagegateway') + client = module.client("storagegateway") if client is None: # this should never happen - module.fail_json(msg='Unknown error, failed to create storagegateway client, no information available.') + module.fail_json(msg="Unknown error, failed to create storagegateway client, no information available.") SGWInformationManager(client, module).fetch() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/sts_assume_role.py b/plugins/modules/sts_assume_role.py index c53bfa9c978..4a4860657cf 100644 --- a/plugins/modules/sts_assume_role.py +++ b/plugins/modules/sts_assume_role.py @@ -112,15 +112,14 @@ def _parse_response(response): - credentials = response.get('Credentials', {}) - user = response.get('AssumedRoleUser', {}) + credentials = response.get("Credentials", {}) + user = response.get("AssumedRoleUser", {}) sts_cred = { - 'access_key': credentials.get('AccessKeyId'), - 'secret_key': credentials.get('SecretAccessKey'), - 'session_token': credentials.get('SessionToken'), - 'expiration': credentials.get('Expiration') - + "access_key": credentials.get("AccessKeyId"), + "secret_key": credentials.get("SecretAccessKey"), + "session_token": credentials.get("SessionToken"), + "expiration": credentials.get("Expiration"), } sts_user = camel_dict_to_snake_dict(user) return sts_cred, sts_user @@ -128,13 +127,13 @@ def _parse_response(response): def assume_role_policy(connection, module): params = { - 'RoleArn': module.params.get('role_arn'), - 'RoleSessionName': module.params.get('role_session_name'), - 'Policy': module.params.get('policy'), - 'DurationSeconds': module.params.get('duration_seconds'), - 'ExternalId': module.params.get('external_id'), - 'SerialNumber': module.params.get('mfa_serial_number'), - 'TokenCode': module.params.get('mfa_token') + "RoleArn": module.params.get("role_arn"), + "RoleSessionName": module.params.get("role_session_name"), + "Policy": module.params.get("policy"), + "DurationSeconds": module.params.get("duration_seconds"), + "ExternalId": module.params.get("external_id"), + "SerialNumber": module.params.get("mfa_serial_number"), + "TokenCode": module.params.get("mfa_token"), } changed = False @@ -154,19 +153,19 @@ def main(): argument_spec = dict( role_arn=dict(required=True), role_session_name=dict(required=True), - duration_seconds=dict(required=False, default=None, type='int'), + duration_seconds=dict(required=False, default=None, type="int"), external_id=dict(required=False, default=None), policy=dict(required=False, default=None), mfa_serial_number=dict(required=False, default=None), - mfa_token=dict(required=False, default=None, no_log=True) + mfa_token=dict(required=False, default=None, no_log=True), ) module = AnsibleAWSModule(argument_spec=argument_spec) - connection = module.client('sts') + connection = module.client("sts") assume_role_policy(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/sts_session_token.py b/plugins/modules/sts_session_token.py index c780097be61..8656a96fc3c 100644 --- a/plugins/modules/sts_session_token.py +++ b/plugins/modules/sts_session_token.py @@ -84,31 +84,31 @@ def normalize_credentials(credentials): - access_key = credentials.get('AccessKeyId', None) - secret_key = credentials.get('SecretAccessKey', None) - session_token = credentials.get('SessionToken', None) - expiration = credentials.get('Expiration', None) + access_key = credentials.get("AccessKeyId", None) + secret_key = credentials.get("SecretAccessKey", None) + session_token = credentials.get("SessionToken", None) + expiration = credentials.get("Expiration", None) return { - 'access_key': access_key, - 'secret_key': secret_key, - 'session_token': session_token, - 'expiration': expiration + "access_key": access_key, + "secret_key": secret_key, + "session_token": session_token, + "expiration": expiration, } def get_session_token(connection, module): - duration_seconds = module.params.get('duration_seconds') - mfa_serial_number = module.params.get('mfa_serial_number') - mfa_token = module.params.get('mfa_token') + duration_seconds = module.params.get("duration_seconds") + mfa_serial_number = module.params.get("mfa_serial_number") + mfa_token = module.params.get("mfa_token") changed = False args = {} if duration_seconds is not None: - args['DurationSeconds'] = duration_seconds + args["DurationSeconds"] = duration_seconds if mfa_serial_number is not None: - args['SerialNumber'] = mfa_serial_number + args["SerialNumber"] = mfa_serial_number if mfa_token is not None: - args['TokenCode'] = mfa_token + args["TokenCode"] = mfa_token try: response = connection.get_session_token(**args) @@ -116,13 +116,13 @@ def get_session_token(connection, module): except ClientError as e: module.fail_json(msg=e) - credentials = normalize_credentials(response.get('Credentials', {})) + credentials = normalize_credentials(response.get("Credentials", {})) module.exit_json(changed=changed, sts_creds=credentials) def main(): argument_spec = dict( - duration_seconds=dict(required=False, default=None, type='int'), + duration_seconds=dict(required=False, default=None, type="int"), mfa_serial_number=dict(required=False, default=None), mfa_token=dict(required=False, default=None, no_log=True), ) @@ -130,12 +130,12 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec) try: - connection = module.client('sts') + connection = module.client("sts") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") get_session_token(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/waf_condition.py b/plugins/modules/waf_condition.py index 6e1911323c9..efbb17e2cf8 100644 --- a/plugins/modules/waf_condition.py +++ b/plugins/modules/waf_condition.py @@ -418,73 +418,79 @@ class Condition(object): - def __init__(self, client, module): self.client = client self.module = module - self.type = module.params['type'] - self.method_suffix = MATCH_LOOKUP[self.type]['method'] - self.conditionset = MATCH_LOOKUP[self.type]['conditionset'] - self.conditionsets = MATCH_LOOKUP[self.type]['conditionset'] + 's' - self.conditionsetid = MATCH_LOOKUP[self.type]['conditionset'] + 'Id' - self.conditiontuple = MATCH_LOOKUP[self.type]['conditiontuple'] - self.conditiontuples = MATCH_LOOKUP[self.type]['conditiontuple'] + 's' - self.conditiontype = MATCH_LOOKUP[self.type]['type'] + self.type = module.params["type"] + self.method_suffix = MATCH_LOOKUP[self.type]["method"] + self.conditionset = MATCH_LOOKUP[self.type]["conditionset"] + self.conditionsets = MATCH_LOOKUP[self.type]["conditionset"] + "s" + self.conditionsetid = MATCH_LOOKUP[self.type]["conditionset"] + "Id" + self.conditiontuple = MATCH_LOOKUP[self.type]["conditiontuple"] + self.conditiontuples = MATCH_LOOKUP[self.type]["conditiontuple"] + "s" + self.conditiontype = MATCH_LOOKUP[self.type]["type"] def format_for_update(self, condition_set_id): # Prep kwargs kwargs = dict() - kwargs['Updates'] = list() + kwargs["Updates"] = list() - for filtr in self.module.params.get('filters'): + for filtr in self.module.params.get("filters"): # Only for ip_set - if self.type == 'ip': + if self.type == "ip": # there might be a better way of detecting an IPv6 address - if ':' in filtr.get('ip_address'): - ip_type = 'IPV6' + if ":" in filtr.get("ip_address"): + ip_type = "IPV6" else: - ip_type = 'IPV4' - condition_insert = {'Type': ip_type, 'Value': filtr.get('ip_address')} + ip_type = "IPV4" + condition_insert = {"Type": ip_type, "Value": filtr.get("ip_address")} # Specific for geo_match_set - if self.type == 'geo': - condition_insert = dict(Type='Country', Value=filtr.get('country')) + if self.type == "geo": + condition_insert = dict(Type="Country", Value=filtr.get("country")) # Common For everything but ip_set and geo_match_set - if self.type not in ('ip', 'geo'): - - condition_insert = dict(FieldToMatch=dict(Type=filtr.get('field_to_match').upper()), - TextTransformation=filtr.get('transformation', 'none').upper()) - - if filtr.get('field_to_match').upper() == "HEADER": - if filtr.get('header'): - condition_insert['FieldToMatch']['Data'] = filtr.get('header').lower() + if self.type not in ("ip", "geo"): + condition_insert = dict( + FieldToMatch=dict(Type=filtr.get("field_to_match").upper()), + TextTransformation=filtr.get("transformation", "none").upper(), + ) + + if filtr.get("field_to_match").upper() == "HEADER": + if filtr.get("header"): + condition_insert["FieldToMatch"]["Data"] = filtr.get("header").lower() else: self.module.fail_json(msg=str("DATA required when HEADER requested")) # Specific for byte_match_set - if self.type == 'byte': - condition_insert['TargetString'] = filtr.get('target_string') - condition_insert['PositionalConstraint'] = filtr.get('position') + if self.type == "byte": + condition_insert["TargetString"] = filtr.get("target_string") + condition_insert["PositionalConstraint"] = filtr.get("position") # Specific for size_constraint_set - if self.type == 'size': - condition_insert['ComparisonOperator'] = filtr.get('comparison') - condition_insert['Size'] = filtr.get('size') + if self.type == "size": + condition_insert["ComparisonOperator"] = filtr.get("comparison") + condition_insert["Size"] = filtr.get("size") # Specific for regex_match_set - if self.type == 'regex': - condition_insert['RegexPatternSetId'] = self.ensure_regex_pattern_present(filtr.get('regex_pattern'))['RegexPatternSetId'] + if self.type == "regex": + condition_insert["RegexPatternSetId"] = self.ensure_regex_pattern_present(filtr.get("regex_pattern"))[ + "RegexPatternSetId" + ] - kwargs['Updates'].append({'Action': 'INSERT', self.conditiontuple: condition_insert}) + kwargs["Updates"].append({"Action": "INSERT", self.conditiontuple: condition_insert}) kwargs[self.conditionsetid] = condition_set_id return kwargs def format_for_deletion(self, condition): - return {'Updates': [{'Action': 'DELETE', self.conditiontuple: current_condition_tuple} - for current_condition_tuple in condition[self.conditiontuples]], - self.conditionsetid: condition[self.conditionsetid]} + return { + "Updates": [ + {"Action": "DELETE", self.conditiontuple: current_condition_tuple} + for current_condition_tuple in condition[self.conditiontuples] + ], + self.conditionsetid: condition[self.conditionsetid], + } @AWSRetry.exponential_backoff() def list_regex_patterns_with_backoff(self, **params): @@ -502,60 +508,77 @@ def list_regex_patterns(self): try: response = self.list_regex_patterns_with_backoff(**params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not list regex patterns') - regex_patterns.extend(response['RegexPatternSets']) - if 'NextMarker' in response: - params['NextMarker'] = response['NextMarker'] + self.module.fail_json_aws(e, msg="Could not list regex patterns") + regex_patterns.extend(response["RegexPatternSets"]) + if "NextMarker" in response: + params["NextMarker"] = response["NextMarker"] else: break return regex_patterns def get_regex_pattern_by_name(self, name): existing_regex_patterns = self.list_regex_patterns() - regex_lookup = dict((item['Name'], item['RegexPatternSetId']) for item in existing_regex_patterns) + regex_lookup = dict((item["Name"], item["RegexPatternSetId"]) for item in existing_regex_patterns) if name in regex_lookup: - return self.get_regex_pattern_set_with_backoff(regex_lookup[name])['RegexPatternSet'] + return self.get_regex_pattern_set_with_backoff(regex_lookup[name])["RegexPatternSet"] else: return None def ensure_regex_pattern_present(self, regex_pattern): - name = regex_pattern['name'] + name = regex_pattern["name"] pattern_set = self.get_regex_pattern_by_name(name) if not pattern_set: - pattern_set = run_func_with_change_token_backoff(self.client, self.module, {'Name': name}, - self.client.create_regex_pattern_set)['RegexPatternSet'] - missing = set(regex_pattern['regex_strings']) - set(pattern_set['RegexPatternStrings']) - extra = set(pattern_set['RegexPatternStrings']) - set(regex_pattern['regex_strings']) + pattern_set = run_func_with_change_token_backoff( + self.client, self.module, {"Name": name}, self.client.create_regex_pattern_set + )["RegexPatternSet"] + missing = set(regex_pattern["regex_strings"]) - set(pattern_set["RegexPatternStrings"]) + extra = set(pattern_set["RegexPatternStrings"]) - set(regex_pattern["regex_strings"]) if not missing and not extra: return pattern_set - updates = [{'Action': 'INSERT', 'RegexPatternString': pattern} for pattern in missing] - updates.extend([{'Action': 'DELETE', 'RegexPatternString': pattern} for pattern in extra]) - run_func_with_change_token_backoff(self.client, self.module, - {'RegexPatternSetId': pattern_set['RegexPatternSetId'], 'Updates': updates}, - self.client.update_regex_pattern_set, wait=True) - return self.get_regex_pattern_set_with_backoff(pattern_set['RegexPatternSetId'])['RegexPatternSet'] + updates = [{"Action": "INSERT", "RegexPatternString": pattern} for pattern in missing] + updates.extend([{"Action": "DELETE", "RegexPatternString": pattern} for pattern in extra]) + run_func_with_change_token_backoff( + self.client, + self.module, + {"RegexPatternSetId": pattern_set["RegexPatternSetId"], "Updates": updates}, + self.client.update_regex_pattern_set, + wait=True, + ) + return self.get_regex_pattern_set_with_backoff(pattern_set["RegexPatternSetId"])["RegexPatternSet"] def delete_unused_regex_pattern(self, regex_pattern_set_id): try: - regex_pattern_set = self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id)['RegexPatternSet'] + regex_pattern_set = self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id)[ + "RegexPatternSet" + ] updates = list() - for regex_pattern_string in regex_pattern_set['RegexPatternStrings']: - updates.append({'Action': 'DELETE', 'RegexPatternString': regex_pattern_string}) - run_func_with_change_token_backoff(self.client, self.module, - {'RegexPatternSetId': regex_pattern_set_id, 'Updates': updates}, - self.client.update_regex_pattern_set) - - run_func_with_change_token_backoff(self.client, self.module, - {'RegexPatternSetId': regex_pattern_set_id}, - self.client.delete_regex_pattern_set, wait=True) - except is_boto3_error_code('WAFNonexistentItemException'): + for regex_pattern_string in regex_pattern_set["RegexPatternStrings"]: + updates.append({"Action": "DELETE", "RegexPatternString": regex_pattern_string}) + run_func_with_change_token_backoff( + self.client, + self.module, + {"RegexPatternSetId": regex_pattern_set_id, "Updates": updates}, + self.client.update_regex_pattern_set, + ) + + run_func_with_change_token_backoff( + self.client, + self.module, + {"RegexPatternSetId": regex_pattern_set_id}, + self.client.delete_regex_pattern_set, + wait=True, + ) + except is_boto3_error_code("WAFNonexistentItemException"): return - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg='Could not delete regex pattern') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg="Could not delete regex pattern") def get_condition_by_name(self, name): - all_conditions = [d for d in self.list_conditions() if d['Name'] == name] + all_conditions = [d for d in self.list_conditions() if d["Name"] == name] if all_conditions: return all_conditions[0][self.conditionsetid] @@ -563,17 +586,17 @@ def get_condition_by_name(self, name): def get_condition_by_id_with_backoff(self, condition_set_id): params = dict() params[self.conditionsetid] = condition_set_id - func = getattr(self.client, 'get_' + self.method_suffix) + func = getattr(self.client, "get_" + self.method_suffix) return func(**params)[self.conditionset] def get_condition_by_id(self, condition_set_id): try: return self.get_condition_by_id_with_backoff(condition_set_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not get condition') + self.module.fail_json_aws(e, msg="Could not get condition") def list_conditions(self): - method = 'list_' + self.method_suffix + 's' + method = "list_" + self.method_suffix + "s" try: paginator = self.client.get_paginator(method) func = paginator.paginate().build_full_result @@ -583,66 +606,68 @@ def list_conditions(self): try: return func()[self.conditionsets] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not list %s conditions' % self.type) + self.module.fail_json_aws(e, msg="Could not list %s conditions" % self.type) def tidy_up_regex_patterns(self, regex_match_set): all_regex_match_sets = self.list_conditions() all_match_set_patterns = list() for rms in all_regex_match_sets: - all_match_set_patterns.extend(conditiontuple['RegexPatternSetId'] - for conditiontuple in self.get_condition_by_id(rms[self.conditionsetid])[self.conditiontuples]) + all_match_set_patterns.extend( + conditiontuple["RegexPatternSetId"] + for conditiontuple in self.get_condition_by_id(rms[self.conditionsetid])[self.conditiontuples] + ) for filtr in regex_match_set[self.conditiontuples]: - if filtr['RegexPatternSetId'] not in all_match_set_patterns: - self.delete_unused_regex_pattern(filtr['RegexPatternSetId']) + if filtr["RegexPatternSetId"] not in all_match_set_patterns: + self.delete_unused_regex_pattern(filtr["RegexPatternSetId"]) def find_condition_in_rules(self, condition_set_id): rules_in_use = [] try: - if self.client.__class__.__name__ == 'WAF': + if self.client.__class__.__name__ == "WAF": all_rules = list_rules_with_backoff(self.client) - elif self.client.__class__.__name__ == 'WAFRegional': + elif self.client.__class__.__name__ == "WAFRegional": all_rules = list_regional_rules_with_backoff(self.client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not list rules') + self.module.fail_json_aws(e, msg="Could not list rules") for rule in all_rules: try: - rule_details = get_rule_with_backoff(self.client, rule['RuleId']) + rule_details = get_rule_with_backoff(self.client, rule["RuleId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not get rule details') - if condition_set_id in [predicate['DataId'] for predicate in rule_details['Predicates']]: - rules_in_use.append(rule_details['Name']) + self.module.fail_json_aws(e, msg="Could not get rule details") + if condition_set_id in [predicate["DataId"] for predicate in rule_details["Predicates"]]: + rules_in_use.append(rule_details["Name"]) return rules_in_use def find_and_delete_condition(self, condition_set_id): current_condition = self.get_condition_by_id(condition_set_id) in_use_rules = self.find_condition_in_rules(condition_set_id) if in_use_rules: - rulenames = ', '.join(in_use_rules) - self.module.fail_json(msg="Condition %s is in use by %s" % (current_condition['Name'], rulenames)) + rulenames = ", ".join(in_use_rules) + self.module.fail_json(msg="Condition %s is in use by %s" % (current_condition["Name"], rulenames)) if current_condition[self.conditiontuples]: # Filters are deleted using update with the DELETE action - func = getattr(self.client, 'update_' + self.method_suffix) + func = getattr(self.client, "update_" + self.method_suffix) params = self.format_for_deletion(current_condition) try: # We do not need to wait for the conditiontuple delete because we wait later for the delete_* call run_func_with_change_token_backoff(self.client, self.module, params, func) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not delete filters from condition') - func = getattr(self.client, 'delete_' + self.method_suffix) + self.module.fail_json_aws(e, msg="Could not delete filters from condition") + func = getattr(self.client, "delete_" + self.method_suffix) params = dict() params[self.conditionsetid] = condition_set_id try: run_func_with_change_token_backoff(self.client, self.module, params, func, wait=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not delete condition') + self.module.fail_json_aws(e, msg="Could not delete condition") # tidy up regex patterns - if self.type == 'regex': + if self.type == "regex": self.tidy_up_regex_patterns(current_condition) return True, {} def find_missing(self, update, current_condition): missing = [] - for desired in update['Updates']: + for desired in update["Updates"]: found = False desired_condition = desired[self.conditiontuple] current_conditions = current_condition[self.conditiontuples] @@ -657,39 +682,41 @@ def find_and_update_condition(self, condition_set_id): current_condition = self.get_condition_by_id(condition_set_id) update = self.format_for_update(condition_set_id) missing = self.find_missing(update, current_condition) - if self.module.params.get('purge_filters'): - extra = [{'Action': 'DELETE', self.conditiontuple: current_tuple} - for current_tuple in current_condition[self.conditiontuples] - if current_tuple not in [desired[self.conditiontuple] for desired in update['Updates']]] + if self.module.params.get("purge_filters"): + extra = [ + {"Action": "DELETE", self.conditiontuple: current_tuple} + for current_tuple in current_condition[self.conditiontuples] + if current_tuple not in [desired[self.conditiontuple] for desired in update["Updates"]] + ] else: extra = [] changed = bool(missing or extra) if changed: - update['Updates'] = missing + extra - func = getattr(self.client, 'update_' + self.method_suffix) + update["Updates"] = missing + extra + func = getattr(self.client, "update_" + self.method_suffix) try: result = run_func_with_change_token_backoff(self.client, self.module, update, func, wait=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not update condition') + self.module.fail_json_aws(e, msg="Could not update condition") return changed, self.get_condition_by_id(condition_set_id) def ensure_condition_present(self): - name = self.module.params['name'] + name = self.module.params["name"] condition_set_id = self.get_condition_by_name(name) if condition_set_id: return self.find_and_update_condition(condition_set_id) else: params = dict() - params['Name'] = name - func = getattr(self.client, 'create_' + self.method_suffix) + params["Name"] = name + func = getattr(self.client, "create_" + self.method_suffix) try: condition = run_func_with_change_token_backoff(self.client, self.module, params, func) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not create condition') + self.module.fail_json_aws(e, msg="Could not create condition") return self.find_and_update_condition(condition[self.conditionset][self.conditionsetid]) def ensure_condition_absent(self): - condition_set_id = self.get_condition_by_name(self.module.params['name']) + condition_set_id = self.get_condition_by_name(self.module.params["name"]) if condition_set_id: return self.find_and_delete_condition(condition_set_id) return False, {} @@ -698,45 +725,46 @@ def ensure_condition_absent(self): def main(): filters_subspec = dict( country=dict(), - field_to_match=dict(choices=['uri', 'query_string', 'header', 'method', 'body']), + field_to_match=dict(choices=["uri", "query_string", "header", "method", "body"]), header=dict(), - transformation=dict(choices=['none', 'compress_white_space', - 'html_entity_decode', 'lowercase', - 'cmd_line', 'url_decode']), - position=dict(choices=['exactly', 'starts_with', 'ends_with', - 'contains', 'contains_word']), - comparison=dict(choices=['EQ', 'NE', 'LE', 'LT', 'GE', 'GT']), + transformation=dict( + choices=["none", "compress_white_space", "html_entity_decode", "lowercase", "cmd_line", "url_decode"] + ), + position=dict(choices=["exactly", "starts_with", "ends_with", "contains", "contains_word"]), + comparison=dict(choices=["EQ", "NE", "LE", "LT", "GE", "GT"]), target_string=dict(), # Bytes - size=dict(type='int'), + size=dict(type="int"), ip_address=dict(), regex_pattern=dict(), ) argument_spec = dict( name=dict(required=True), - type=dict(required=True, choices=['byte', 'geo', 'ip', 'regex', 'size', 'sql', 'xss']), - filters=dict(type='list', elements='dict'), - purge_filters=dict(type='bool', default=False), - waf_regional=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']), + type=dict(required=True, choices=["byte", "geo", "ip", "regex", "size", "sql", "xss"]), + filters=dict(type="list", elements="dict"), + purge_filters=dict(type="bool", default=False), + waf_regional=dict(type="bool", default=False), + state=dict(default="present", choices=["present", "absent"]), + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[["state", "present", ["filters"]]], ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[['state', 'present', ['filters']]]) - state = module.params.get('state') + state = module.params.get("state") - resource = 'waf' if not module.params['waf_regional'] else 'waf-regional' + resource = "waf" if not module.params["waf_regional"] else "waf-regional" client = module.client(resource) condition = Condition(client, module) - if state == 'present': + if state == "present": (changed, results) = condition.ensure_condition_present() # return a condition agnostic ID for use by waf_rule - results['ConditionId'] = results[condition.conditionsetid] + results["ConditionId"] = results[condition.conditionsetid] else: (changed, results) = condition.ensure_condition_absent() module.exit_json(changed=changed, condition=camel_dict_to_snake_dict(results)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/waf_info.py b/plugins/modules/waf_info.py index 37a8c2bd025..ea294c92ed4 100644 --- a/plugins/modules/waf_info.py +++ b/plugins/modules/waf_info.py @@ -123,22 +123,20 @@ def main(): argument_spec = dict( name=dict(required=False), - waf_regional=dict(type='bool', default=False) + waf_regional=dict(type="bool", default=False), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - resource = 'waf' if not module.params['waf_regional'] else 'waf-regional' + resource = "waf" if not module.params["waf_regional"] else "waf-regional" client = module.client(resource) web_acls = list_web_acls(client, module) - name = module.params['name'] + name = module.params["name"] if name: - web_acls = [web_acl for web_acl in web_acls if - web_acl['Name'] == name] + web_acls = [web_acl for web_acl in web_acls if web_acl["Name"] == name] if not web_acls: module.fail_json(msg="WAF named %s not found" % name) - module.exit_json(wafs=[get_web_acl(client, module, web_acl['WebACLId']) - for web_acl in web_acls]) + module.exit_json(wafs=[get_web_acl(client, module, web_acl["WebACLId"]) for web_acl in web_acls]) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/waf_rule.py b/plugins/modules/waf_rule.py index 54129dafc12..98064dd8ca4 100644 --- a/plugins/modules/waf_rule.py +++ b/plugins/modules/waf_rule.py @@ -157,48 +157,48 @@ def get_rule_by_name(client, module, name): - rules = [d['RuleId'] for d in list_rules(client, module) if d['Name'] == name] + rules = [d["RuleId"] for d in list_rules(client, module) if d["Name"] == name] if rules: return rules[0] def get_rule(client, module, rule_id): try: - return client.get_rule(RuleId=rule_id)['Rule'] + return client.get_rule(RuleId=rule_id)["Rule"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not get WAF rule') + module.fail_json_aws(e, msg="Could not get WAF rule") def list_rules(client, module): - if client.__class__.__name__ == 'WAF': + if client.__class__.__name__ == "WAF": try: return list_rules_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not list WAF rules') - elif client.__class__.__name__ == 'WAFRegional': + module.fail_json_aws(e, msg="Could not list WAF rules") + elif client.__class__.__name__ == "WAFRegional": try: return list_regional_rules_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not list WAF Regional rules') + module.fail_json_aws(e, msg="Could not list WAF Regional rules") def list_regional_rules(client, module): try: return list_regional_rules_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not list WAF rules') + module.fail_json_aws(e, msg="Could not list WAF rules") def find_and_update_rule(client, module, rule_id): rule = get_rule(client, module, rule_id) - rule_id = rule['RuleId'] + rule_id = rule["RuleId"] existing_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP) desired_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP) all_conditions = dict() for condition_type in MATCH_LOOKUP: - method = 'list_' + MATCH_LOOKUP[condition_type]['method'] + 's' + method = "list_" + MATCH_LOOKUP[condition_type]["method"] + "s" all_conditions[condition_type] = dict() try: paginator = client.get_paginator(method) @@ -208,125 +208,133 @@ def find_and_update_rule(client, module, rule_id): # and throw different exceptions func = getattr(client, method) try: - pred_results = func()[MATCH_LOOKUP[condition_type]['conditionset'] + 's'] + pred_results = func()[MATCH_LOOKUP[condition_type]["conditionset"] + "s"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not list %s conditions' % condition_type) + module.fail_json_aws(e, msg="Could not list %s conditions" % condition_type) for pred in pred_results: - pred['DataId'] = pred[MATCH_LOOKUP[condition_type]['conditionset'] + 'Id'] - all_conditions[condition_type][pred['Name']] = camel_dict_to_snake_dict(pred) - all_conditions[condition_type][pred['DataId']] = camel_dict_to_snake_dict(pred) + pred["DataId"] = pred[MATCH_LOOKUP[condition_type]["conditionset"] + "Id"] + all_conditions[condition_type][pred["Name"]] = camel_dict_to_snake_dict(pred) + all_conditions[condition_type][pred["DataId"]] = camel_dict_to_snake_dict(pred) - for condition in module.params['conditions']: - desired_conditions[condition['type']][condition['name']] = condition + for condition in module.params["conditions"]: + desired_conditions[condition["type"]][condition["name"]] = condition - reverse_condition_types = dict((v['type'], k) for (k, v) in MATCH_LOOKUP.items()) - for condition in rule['Predicates']: - existing_conditions[reverse_condition_types[condition['Type']]][condition['DataId']] = camel_dict_to_snake_dict(condition) + reverse_condition_types = dict((v["type"], k) for (k, v) in MATCH_LOOKUP.items()) + for condition in rule["Predicates"]: + existing_conditions[reverse_condition_types[condition["Type"]]][condition["DataId"]] = camel_dict_to_snake_dict( + condition + ) insertions = list() deletions = list() for condition_type in desired_conditions: - for (condition_name, condition) in desired_conditions[condition_type].items(): + for condition_name, condition in desired_conditions[condition_type].items(): if condition_name not in all_conditions[condition_type]: module.fail_json(msg="Condition %s of type %s does not exist" % (condition_name, condition_type)) - condition['data_id'] = all_conditions[condition_type][condition_name]['data_id'] - if condition['data_id'] not in existing_conditions[condition_type]: + condition["data_id"] = all_conditions[condition_type][condition_name]["data_id"] + if condition["data_id"] not in existing_conditions[condition_type]: insertions.append(format_for_insertion(condition)) - if module.params['purge_conditions']: + if module.params["purge_conditions"]: for condition_type in existing_conditions: - deletions.extend([format_for_deletion(condition) for condition in existing_conditions[condition_type].values() - if not all_conditions[condition_type][condition['data_id']]['name'] in desired_conditions[condition_type]]) + deletions.extend( + [ + format_for_deletion(condition) + for condition in existing_conditions[condition_type].values() + if not all_conditions[condition_type][condition["data_id"]]["name"] + in desired_conditions[condition_type] + ] + ) changed = bool(insertions or deletions) - update = { - 'RuleId': rule_id, - 'Updates': insertions + deletions - } + update = {"RuleId": rule_id, "Updates": insertions + deletions} if changed: try: run_func_with_change_token_backoff(client, module, update, client.update_rule, wait=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not update rule conditions') + module.fail_json_aws(e, msg="Could not update rule conditions") return changed, get_rule(client, module, rule_id) def format_for_insertion(condition): - return dict(Action='INSERT', - Predicate=dict(Negated=condition['negated'], - Type=MATCH_LOOKUP[condition['type']]['type'], - DataId=condition['data_id'])) + return dict( + Action="INSERT", + Predicate=dict( + Negated=condition["negated"], Type=MATCH_LOOKUP[condition["type"]]["type"], DataId=condition["data_id"] + ), + ) def format_for_deletion(condition): - return dict(Action='DELETE', - Predicate=dict(Negated=condition['negated'], - Type=condition['type'], - DataId=condition['data_id'])) + return dict( + Action="DELETE", + Predicate=dict(Negated=condition["negated"], Type=condition["type"], DataId=condition["data_id"]), + ) def remove_rule_conditions(client, module, rule_id): - conditions = get_rule(client, module, rule_id)['Predicates'] + conditions = get_rule(client, module, rule_id)["Predicates"] updates = [format_for_deletion(camel_dict_to_snake_dict(condition)) for condition in conditions] try: - run_func_with_change_token_backoff(client, module, {'RuleId': rule_id, 'Updates': updates}, client.update_rule) + run_func_with_change_token_backoff(client, module, {"RuleId": rule_id, "Updates": updates}, client.update_rule) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not remove rule conditions') + module.fail_json_aws(e, msg="Could not remove rule conditions") def ensure_rule_present(client, module): - name = module.params['name'] + name = module.params["name"] rule_id = get_rule_by_name(client, module, name) params = dict() if rule_id: return find_and_update_rule(client, module, rule_id) else: - params['Name'] = module.params['name'] - metric_name = module.params['metric_name'] + params["Name"] = module.params["name"] + metric_name = module.params["metric_name"] if not metric_name: - metric_name = re.sub(r'[^a-zA-Z0-9]', '', module.params['name']) - params['MetricName'] = metric_name + metric_name = re.sub(r"[^a-zA-Z0-9]", "", module.params["name"]) + params["MetricName"] = metric_name try: - new_rule = run_func_with_change_token_backoff(client, module, params, client.create_rule)['Rule'] + new_rule = run_func_with_change_token_backoff(client, module, params, client.create_rule)["Rule"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not create rule') - return find_and_update_rule(client, module, new_rule['RuleId']) + module.fail_json_aws(e, msg="Could not create rule") + return find_and_update_rule(client, module, new_rule["RuleId"]) def find_rule_in_web_acls(client, module, rule_id): web_acls_in_use = [] try: - if client.__class__.__name__ == 'WAF': + if client.__class__.__name__ == "WAF": all_web_acls = list_web_acls_with_backoff(client) - elif client.__class__.__name__ == 'WAFRegional': + elif client.__class__.__name__ == "WAFRegional": all_web_acls = list_regional_web_acls_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not list Web ACLs') + module.fail_json_aws(e, msg="Could not list Web ACLs") for web_acl in all_web_acls: try: - web_acl_details = get_web_acl_with_backoff(client, web_acl['WebACLId']) + web_acl_details = get_web_acl_with_backoff(client, web_acl["WebACLId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not get Web ACL details') - if rule_id in [rule['RuleId'] for rule in web_acl_details['Rules']]: - web_acls_in_use.append(web_acl_details['Name']) + module.fail_json_aws(e, msg="Could not get Web ACL details") + if rule_id in [rule["RuleId"] for rule in web_acl_details["Rules"]]: + web_acls_in_use.append(web_acl_details["Name"]) return web_acls_in_use def ensure_rule_absent(client, module): - rule_id = get_rule_by_name(client, module, module.params['name']) + rule_id = get_rule_by_name(client, module, module.params["name"]) in_use_web_acls = find_rule_in_web_acls(client, module, rule_id) if in_use_web_acls: - web_acl_names = ', '.join(in_use_web_acls) - module.fail_json(msg="Rule %s is in use by Web ACL(s) %s" % - (module.params['name'], web_acl_names)) + web_acl_names = ", ".join(in_use_web_acls) + module.fail_json(msg="Rule %s is in use by Web ACL(s) %s" % (module.params["name"], web_acl_names)) if rule_id: remove_rule_conditions(client, module, rule_id) try: - return True, run_func_with_change_token_backoff(client, module, {'RuleId': rule_id}, client.delete_rule, wait=True) + return True, run_func_with_change_token_backoff( + client, module, {"RuleId": rule_id}, client.delete_rule, wait=True + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not delete rule') + module.fail_json_aws(e, msg="Could not delete rule") return False, {} @@ -334,17 +342,17 @@ def main(): argument_spec = dict( name=dict(required=True), metric_name=dict(), - state=dict(default='present', choices=['present', 'absent']), - conditions=dict(type='list', elements='dict'), - purge_conditions=dict(type='bool', default=False), - waf_regional=dict(type='bool', default=False), + state=dict(default="present", choices=["present", "absent"]), + conditions=dict(type="list", elements="dict"), + purge_conditions=dict(type="bool", default=False), + waf_regional=dict(type="bool", default=False), ) module = AnsibleAWSModule(argument_spec=argument_spec) - state = module.params.get('state') + state = module.params.get("state") - resource = 'waf' if not module.params['waf_regional'] else 'waf-regional' + resource = "waf" if not module.params["waf_regional"] else "waf-regional" client = module.client(resource) - if state == 'present': + if state == "present": (changed, results) = ensure_rule_present(client, module) else: (changed, results) = ensure_rule_absent(client, module) @@ -352,5 +360,5 @@ def main(): module.exit_json(changed=changed, rule=camel_dict_to_snake_dict(results)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/waf_web_acl.py b/plugins/modules/waf_web_acl.py index dc35308e833..4b71231aec9 100644 --- a/plugins/modules/waf_web_acl.py +++ b/plugins/modules/waf_web_acl.py @@ -179,7 +179,7 @@ def get_web_acl_by_name(client, module, name): - acls = [d['WebACLId'] for d in list_web_acls(client, module) if d['Name'] == name] + acls = [d["WebACLId"] for d in list_web_acls(client, module) if d["Name"] == name] if acls: return acls[0] else: @@ -187,91 +187,93 @@ def get_web_acl_by_name(client, module, name): def create_rule_lookup(client, module): - if client.__class__.__name__ == 'WAF': + if client.__class__.__name__ == "WAF": try: rules = list_rules_with_backoff(client) - return dict((rule['Name'], rule) for rule in rules) + return dict((rule["Name"], rule) for rule in rules) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not list rules') - elif client.__class__.__name__ == 'WAFRegional': + module.fail_json_aws(e, msg="Could not list rules") + elif client.__class__.__name__ == "WAFRegional": try: rules = list_regional_rules_with_backoff(client) - return dict((rule['Name'], rule) for rule in rules) + return dict((rule["Name"], rule) for rule in rules) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not list regional rules') + module.fail_json_aws(e, msg="Could not list regional rules") def get_web_acl(client, module, web_acl_id): try: - return client.get_web_acl(WebACLId=web_acl_id)['WebACL'] + return client.get_web_acl(WebACLId=web_acl_id)["WebACL"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not get Web ACL with id %s' % web_acl_id) + module.fail_json_aws(e, msg="Could not get Web ACL with id %s" % web_acl_id) -def list_web_acls(client, module,): - if client.__class__.__name__ == 'WAF': +def list_web_acls( + client, + module, +): + if client.__class__.__name__ == "WAF": try: return list_web_acls_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not get Web ACLs') - elif client.__class__.__name__ == 'WAFRegional': + module.fail_json_aws(e, msg="Could not get Web ACLs") + elif client.__class__.__name__ == "WAFRegional": try: return list_regional_web_acls_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not get Web ACLs') + module.fail_json_aws(e, msg="Could not get Web ACLs") def find_and_update_web_acl(client, module, web_acl_id): acl = get_web_acl(client, module, web_acl_id) rule_lookup = create_rule_lookup(client, module) - existing_rules = acl['Rules'] - desired_rules = [{'RuleId': rule_lookup[rule['name']]['RuleId'], - 'Priority': rule['priority'], - 'Action': {'Type': rule['action'].upper()}, - 'Type': rule.get('type', 'regular').upper()} - for rule in module.params['rules']] + existing_rules = acl["Rules"] + desired_rules = [ + { + "RuleId": rule_lookup[rule["name"]]["RuleId"], + "Priority": rule["priority"], + "Action": {"Type": rule["action"].upper()}, + "Type": rule.get("type", "regular").upper(), + } + for rule in module.params["rules"] + ] missing = [rule for rule in desired_rules if rule not in existing_rules] extras = [] - if module.params['purge_rules']: + if module.params["purge_rules"]: extras = [rule for rule in existing_rules if rule not in desired_rules] - insertions = [format_for_update(rule, 'INSERT') for rule in missing] - deletions = [format_for_update(rule, 'DELETE') for rule in extras] + insertions = [format_for_update(rule, "INSERT") for rule in missing] + deletions = [format_for_update(rule, "DELETE") for rule in extras] changed = bool(insertions + deletions) # Purge rules before adding new ones in case a deletion shares the same # priority as an insertion. - params = { - 'WebACLId': acl['WebACLId'], - 'DefaultAction': acl['DefaultAction'] - } + params = {"WebACLId": acl["WebACLId"], "DefaultAction": acl["DefaultAction"]} change_tokens = [] if deletions: try: - params['Updates'] = deletions + params["Updates"] = deletions result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl) - change_tokens.append(result['ChangeToken']) + change_tokens.append(result["ChangeToken"]) get_waiter( - client, 'change_token_in_sync', - ).wait( - ChangeToken=result['ChangeToken'] - ) + client, + "change_token_in_sync", + ).wait(ChangeToken=result["ChangeToken"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not update Web ACL') + module.fail_json_aws(e, msg="Could not update Web ACL") if insertions: try: - params['Updates'] = insertions + params["Updates"] = insertions result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl) - change_tokens.append(result['ChangeToken']) + change_tokens.append(result["ChangeToken"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not update Web ACL') + module.fail_json_aws(e, msg="Could not update Web ACL") if change_tokens: for token in change_tokens: get_waiter( - client, 'change_token_in_sync', - ).wait( - ChangeToken=token - ) + client, + "change_token_in_sync", + ).wait(ChangeToken=token) if changed: acl = get_web_acl(client, module, web_acl_id) return changed, acl @@ -281,77 +283,79 @@ def format_for_update(rule, action): return dict( Action=action, ActivatedRule=dict( - Priority=rule['Priority'], - RuleId=rule['RuleId'], - Action=dict( - Type=rule['Action']['Type'] - ) - ) + Priority=rule["Priority"], + RuleId=rule["RuleId"], + Action=dict(Type=rule["Action"]["Type"]), + ), ) def remove_rules_from_web_acl(client, module, web_acl_id): acl = get_web_acl(client, module, web_acl_id) - deletions = [format_for_update(rule, 'DELETE') for rule in acl['Rules']] + deletions = [format_for_update(rule, "DELETE") for rule in acl["Rules"]] try: - params = {'WebACLId': acl['WebACLId'], 'DefaultAction': acl['DefaultAction'], 'Updates': deletions} + params = {"WebACLId": acl["WebACLId"], "DefaultAction": acl["DefaultAction"], "Updates": deletions} run_func_with_change_token_backoff(client, module, params, client.update_web_acl) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not remove rule') + module.fail_json_aws(e, msg="Could not remove rule") def ensure_web_acl_present(client, module): changed = False result = None - name = module.params['name'] + name = module.params["name"] web_acl_id = get_web_acl_by_name(client, module, name) if web_acl_id: (changed, result) = find_and_update_web_acl(client, module, web_acl_id) else: - metric_name = module.params['metric_name'] + metric_name = module.params["metric_name"] if not metric_name: - metric_name = re.sub(r'[^A-Za-z0-9]', '', module.params['name']) - default_action = module.params['default_action'].upper() + metric_name = re.sub(r"[^A-Za-z0-9]", "", module.params["name"]) + default_action = module.params["default_action"].upper() try: - params = {'Name': name, 'MetricName': metric_name, 'DefaultAction': {'Type': default_action}} + params = {"Name": name, "MetricName": metric_name, "DefaultAction": {"Type": default_action}} new_web_acl = run_func_with_change_token_backoff(client, module, params, client.create_web_acl) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not create Web ACL') - (changed, result) = find_and_update_web_acl(client, module, new_web_acl['WebACL']['WebACLId']) + module.fail_json_aws(e, msg="Could not create Web ACL") + (changed, result) = find_and_update_web_acl(client, module, new_web_acl["WebACL"]["WebACLId"]) return changed, result def ensure_web_acl_absent(client, module): - web_acl_id = get_web_acl_by_name(client, module, module.params['name']) + web_acl_id = get_web_acl_by_name(client, module, module.params["name"]) if web_acl_id: web_acl = get_web_acl(client, module, web_acl_id) - if web_acl['Rules']: + if web_acl["Rules"]: remove_rules_from_web_acl(client, module, web_acl_id) try: - run_func_with_change_token_backoff(client, module, {'WebACLId': web_acl_id}, client.delete_web_acl, wait=True) + run_func_with_change_token_backoff( + client, module, {"WebACLId": web_acl_id}, client.delete_web_acl, wait=True + ) return True, {} except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not delete Web ACL') + module.fail_json_aws(e, msg="Could not delete Web ACL") return False, {} def main(): argument_spec = dict( name=dict(required=True), - default_action=dict(choices=['block', 'allow', 'count']), + default_action=dict(choices=["block", "allow", "count"]), metric_name=dict(), - state=dict(default='present', choices=['present', 'absent']), - rules=dict(type='list', elements='dict'), - purge_rules=dict(type='bool', default=False), - waf_regional=dict(type='bool', default=False) + state=dict(default="present", choices=["present", "absent"]), + rules=dict(type="list", elements="dict"), + purge_rules=dict(type="bool", default=False), + waf_regional=dict(type="bool", default=False), + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[["state", "present", ["default_action", "rules"]]], ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[['state', 'present', ['default_action', 'rules']]]) - state = module.params.get('state') + state = module.params.get("state") - resource = 'waf' if not module.params['waf_regional'] else 'waf-regional' + resource = "waf" if not module.params["waf_regional"] else "waf-regional" client = module.client(resource) - if state == 'present': + if state == "present": (changed, results) = ensure_web_acl_present(client, module) else: (changed, results) = ensure_web_acl_absent(client, module) @@ -359,5 +363,5 @@ def main(): module.exit_json(changed=changed, web_acl=camel_dict_to_snake_dict(results)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/wafv2_ip_set.py b/plugins/modules/wafv2_ip_set.py index 961c9325b31..b96ba0cb1c1 100644 --- a/plugins/modules/wafv2_ip_set.py +++ b/plugins/modules/wafv2_ip_set.py @@ -138,41 +138,36 @@ def __init__(self, wafv2, name, scope, fail_json_aws): self.existing_set, self.id, self.locktoken, self.arn = self.get_set() def description(self): - return self.existing_set.get('Description') + return self.existing_set.get("Description") def _format_set(self, ip_set): if ip_set is None: return None - return camel_dict_to_snake_dict(self.existing_set, ignore_list=['tags']) + return camel_dict_to_snake_dict(self.existing_set, ignore_list=["tags"]) def get(self): return self._format_set(self.existing_set) def remove(self): try: - response = self.wafv2.delete_ip_set( - Name=self.name, - Scope=self.scope, - Id=self.id, - LockToken=self.locktoken - ) + response = self.wafv2.delete_ip_set(Name=self.name, Scope=self.scope, Id=self.id, LockToken=self.locktoken) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to remove wafv2 ip set.") return {} def create(self, description, ip_address_version, addresses, tags): req_obj = { - 'Name': self.name, - 'Scope': self.scope, - 'IPAddressVersion': ip_address_version, - 'Addresses': addresses, + "Name": self.name, + "Scope": self.scope, + "IPAddressVersion": ip_address_version, + "Addresses": addresses, } if description: - req_obj['Description'] = description + req_obj["Description"] = description if tags: - req_obj['Tags'] = ansible_dict_to_boto3_tag_list(tags) + req_obj["Tags"] = ansible_dict_to_boto3_tag_list(tags) try: response = self.wafv2.create_ip_set(**req_obj) @@ -184,15 +179,15 @@ def create(self, description, ip_address_version, addresses, tags): def update(self, description, addresses): req_obj = { - 'Name': self.name, - 'Scope': self.scope, - 'Id': self.id, - 'Addresses': addresses, - 'LockToken': self.locktoken + "Name": self.name, + "Scope": self.scope, + "Id": self.id, + "Addresses": addresses, + "LockToken": self.locktoken, } if description: - req_obj['Description'] = description + req_obj["Description"] = description try: response = self.wafv2.update_ip_set(**req_obj) @@ -208,38 +203,31 @@ def get_set(self): id = None arn = None locktoken = None - for item in response.get('IPSets'): - if item.get('Name') == self.name: - id = item.get('Id') - locktoken = item.get('LockToken') - arn = item.get('ARN') + for item in response.get("IPSets"): + if item.get("Name") == self.name: + id = item.get("Id") + locktoken = item.get("LockToken") + arn = item.get("ARN") if id: try: - existing_set = self.wafv2.get_ip_set( - Name=self.name, - Scope=self.scope, - Id=id - ).get('IPSet') + existing_set = self.wafv2.get_ip_set(Name=self.name, Scope=self.scope, Id=id).get("IPSet") except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to get wafv2 ip set.") tags = describe_wafv2_tags(self.wafv2, arn, self.fail_json_aws) - existing_set['tags'] = tags + existing_set["tags"] = tags return existing_set, id, locktoken, arn def list(self, Nextmarker=None): # there is currently no paginator for wafv2 - req_obj = { - 'Scope': self.scope, - 'Limit': 100 - } + req_obj = {"Scope": self.scope, "Limit": 100} if Nextmarker: - req_obj['NextMarker'] = Nextmarker + req_obj["NextMarker"] = Nextmarker try: response = self.wafv2.list_ip_sets(**req_obj) - if response.get('NextMarker'): - response['IPSets'] += self.list(Nextmarker=response.get('NextMarker')).get('IPSets') + if response.get("NextMarker"): + response["IPSets"] += self.list(Nextmarker=response.get("NextMarker")).get("IPSets") except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to list wafv2 ip set.") @@ -249,11 +237,11 @@ def list(self, Nextmarker=None): def compare(existing_set, addresses, purge_addresses, state): diff = False new_rules = [] - existing_rules = existing_set.get('addresses') - if state == 'present': + existing_rules = existing_set.get("addresses") + if state == "present": if purge_addresses: new_rules = addresses - if sorted(addresses) != sorted(existing_set.get('addresses')): + if sorted(addresses) != sorted(existing_set.get("addresses")): diff = True else: @@ -275,23 +263,22 @@ def compare(existing_set, addresses, purge_addresses, state): def main(): - arg_spec = dict( - state=dict(type='str', required=True, choices=['present', 'absent']), - name=dict(type='str', required=True), - scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']), - description=dict(type='str'), - ip_address_version=dict(type='str', choices=['IPV4', 'IPV6']), - addresses=dict(type='list', elements='str'), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - purge_addresses=dict(type='bool', default=True), + state=dict(type="str", required=True, choices=["present", "absent"]), + name=dict(type="str", required=True), + scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), + description=dict(type="str"), + ip_address_version=dict(type="str", choices=["IPV4", "IPV6"]), + addresses=dict(type="list", elements="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + purge_addresses=dict(type="bool", default=True), ) module = AnsibleAWSModule( argument_spec=arg_spec, supports_check_mode=True, - required_if=[['state', 'present', ['ip_address_version', 'addresses']]] + required_if=[["state", "present", ["ip_address_version", "addresses"]]], ) state = module.params.get("state") @@ -305,17 +292,18 @@ def main(): purge_addresses = module.params.get("purge_addresses") check_mode = module.check_mode - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") change = False retval = {} ip_set = IpSet(wafv2, name, scope, module.fail_json_aws) - if state == 'present': - + if state == "present": if ip_set.get(): - tags_updated = ensure_wafv2_tags(wafv2, ip_set.arn, tags, purge_tags, module.fail_json_aws, module.check_mode) + tags_updated = ensure_wafv2_tags( + wafv2, ip_set.arn, tags, purge_tags, module.fail_json_aws, module.check_mode + ) ips_updated, addresses = compare(ip_set.get(), addresses, purge_addresses, state) description_updated = bool(description) and ip_set.description() != description change = ips_updated or description_updated or tags_updated @@ -323,32 +311,23 @@ def main(): if module.check_mode: pass elif ips_updated or description_updated: - retval = ip_set.update( - description=description, - addresses=addresses - ) + retval = ip_set.update(description=description, addresses=addresses) elif tags_updated: retval, id, locktoken, arn = ip_set.get_set() else: if not check_mode: retval = ip_set.create( - description=description, - ip_address_version=ip_address_version, - addresses=addresses, - tags=tags + description=description, ip_address_version=ip_address_version, addresses=addresses, tags=tags ) change = True - if state == 'absent': + if state == "absent": if ip_set.get(): if addresses: if len(addresses) > 0: change, addresses = compare(ip_set.get(), addresses, purge_addresses, state) if change and not check_mode: - retval = ip_set.update( - description=description, - addresses=addresses - ) + retval = ip_set.update(description=description, addresses=addresses) else: if not check_mode: retval = ip_set.remove() @@ -357,5 +336,5 @@ def main(): module.exit_json(changed=change, **retval) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/wafv2_ip_set_info.py b/plugins/modules/wafv2_ip_set_info.py index 4e0d4feb538..caca5cd7081 100644 --- a/plugins/modules/wafv2_ip_set_info.py +++ b/plugins/modules/wafv2_ip_set_info.py @@ -83,17 +83,16 @@ def list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=None): # there is currently no paginator for wafv2 - req_obj = { - 'Scope': scope, - 'Limit': 100 - } + req_obj = {"Scope": scope, "Limit": 100} if Nextmarker: - req_obj['NextMarker'] = Nextmarker + req_obj["NextMarker"] = Nextmarker try: response = wafv2.list_ip_sets(**req_obj) - if response.get('NextMarker'): - response['IPSets'] += list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=response.get('NextMarker')).get('IPSets') + if response.get("NextMarker"): + response["IPSets"] += list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=response.get("NextMarker")).get( + "IPSets" + ) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to list wafv2 ip set") return response @@ -101,21 +100,15 @@ def list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=None): def get_ip_set(wafv2, name, scope, id, fail_json_aws): try: - response = wafv2.get_ip_set( - Name=name, - Scope=scope, - Id=id - ) + response = wafv2.get_ip_set(Name=name, Scope=scope, Id=id) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to get wafv2 ip set") return response def main(): - arg_spec = dict( - name=dict(type='str', required=True), - scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']) + name=dict(type="str", required=True), scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]) ) module = AnsibleAWSModule( @@ -126,26 +119,26 @@ def main(): name = module.params.get("name") scope = module.params.get("scope") - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") # check if ip set exist response = list_ip_sets(wafv2, scope, module.fail_json_aws) id = None - for item in response.get('IPSets'): - if item.get('Name') == name: - id = item.get('Id') - arn = item.get('ARN') + for item in response.get("IPSets"): + if item.get("Name") == name: + id = item.get("Id") + arn = item.get("ARN") retval = {} existing_set = None if id: existing_set = get_ip_set(wafv2, name, scope, id, module.fail_json_aws) - retval = camel_dict_to_snake_dict(existing_set.get('IPSet')) - retval['tags'] = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) or {} + retval = camel_dict_to_snake_dict(existing_set.get("IPSet")) + retval["tags"] = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) or {} module.exit_json(**retval) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/wafv2_resources.py b/plugins/modules/wafv2_resources.py index 552a2de03bd..b36f517120b 100644 --- a/plugins/modules/wafv2_resources.py +++ b/plugins/modules/wafv2_resources.py @@ -73,11 +73,7 @@ def get_web_acl(wafv2, name, scope, id, fail_json_aws): try: - response = wafv2.get_web_acl( - Name=name, - Scope=scope, - Id=id - ) + response = wafv2.get_web_acl(Name=name, Scope=scope, Id=id) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to get wafv2 web acl.") return response @@ -85,9 +81,7 @@ def get_web_acl(wafv2, name, scope, id, fail_json_aws): def list_wafv2_resources(wafv2, arn, fail_json_aws): try: - response = wafv2.list_resources_for_web_acl( - WebACLArn=arn - ) + response = wafv2.list_resources_for_web_acl(WebACLArn=arn) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to list wafv2 web acl.") return response @@ -95,10 +89,7 @@ def list_wafv2_resources(wafv2, arn, fail_json_aws): def add_wafv2_resources(wafv2, waf_arn, arn, fail_json_aws): try: - response = wafv2.associate_web_acl( - WebACLArn=waf_arn, - ResourceArn=arn - ) + response = wafv2.associate_web_acl(WebACLArn=waf_arn, ResourceArn=arn) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to add wafv2 web acl.") return response @@ -106,27 +97,24 @@ def add_wafv2_resources(wafv2, waf_arn, arn, fail_json_aws): def remove_resources(wafv2, arn, fail_json_aws): try: - response = wafv2.disassociate_web_acl( - ResourceArn=arn - ) + response = wafv2.disassociate_web_acl(ResourceArn=arn) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to remove wafv2 web acl.") return response def main(): - arg_spec = dict( - state=dict(type='str', required=True, choices=['present', 'absent']), - name=dict(type='str'), - scope=dict(type='str', choices=['CLOUDFRONT', 'REGIONAL']), - arn=dict(type='str', required=True) + state=dict(type="str", required=True, choices=["present", "absent"]), + name=dict(type="str"), + scope=dict(type="str", choices=["CLOUDFRONT", "REGIONAL"]), + arn=dict(type="str", required=True), ) module = AnsibleAWSModule( argument_spec=arg_spec, supports_check_mode=True, - required_if=[['state', 'present', ['name', 'scope']]] + required_if=[["state", "present", ["name", "scope"]]], ) state = module.params.get("state") @@ -135,7 +123,7 @@ def main(): arn = module.params.get("arn") check_mode = module.check_mode - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") # check if web acl exists @@ -145,26 +133,26 @@ def main(): retval = {} change = False - for item in response.get('WebACLs'): - if item.get('Name') == name: - id = item.get('Id') + for item in response.get("WebACLs"): + if item.get("Name") == name: + id = item.get("Id") if id: existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws) - waf_arn = existing_acl.get('WebACL').get('ARN') + waf_arn = existing_acl.get("WebACL").get("ARN") retval = list_wafv2_resources(wafv2, waf_arn, module.fail_json_aws) - if state == 'present': + if state == "present": if retval: - if arn not in retval.get('ResourceArns'): + if arn not in retval.get("ResourceArns"): change = True if not check_mode: retval = add_wafv2_resources(wafv2, waf_arn, arn, module.fail_json_aws) - elif state == 'absent': + elif state == "absent": if retval: - if arn in retval.get('ResourceArns'): + if arn in retval.get("ResourceArns"): change = True if not check_mode: retval = remove_resources(wafv2, arn, module.fail_json_aws) @@ -172,5 +160,5 @@ def main(): module.exit_json(changed=change, **camel_dict_to_snake_dict(retval)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/wafv2_resources_info.py b/plugins/modules/wafv2_resources_info.py index f9c5c3c08c2..5cafee1f67d 100644 --- a/plugins/modules/wafv2_resources_info.py +++ b/plugins/modules/wafv2_resources_info.py @@ -62,11 +62,7 @@ def get_web_acl(wafv2, name, scope, id, fail_json_aws): try: - response = wafv2.get_web_acl( - Name=name, - Scope=scope, - Id=id - ) + response = wafv2.get_web_acl(Name=name, Scope=scope, Id=id) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to get wafv2 web acl.") return response @@ -78,19 +74,16 @@ def list_web_acls(wafv2, scope, fail_json_aws): def list_wafv2_resources(wafv2, arn, fail_json_aws): try: - response = wafv2.list_resources_for_web_acl( - WebACLArn=arn - ) + response = wafv2.list_resources_for_web_acl(WebACLArn=arn) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to list wafv2 resources.") return response def main(): - arg_spec = dict( - name=dict(type='str', required=True), - scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']) + name=dict(type="str", required=True), + scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), ) module = AnsibleAWSModule( @@ -101,25 +94,25 @@ def main(): name = module.params.get("name") scope = module.params.get("scope") - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") # check if web acl exists response = list_web_acls(wafv2, scope, module.fail_json_aws) id = None retval = {} - for item in response.get('WebACLs'): - if item.get('Name') == name: - id = item.get('Id') + for item in response.get("WebACLs"): + if item.get("Name") == name: + id = item.get("Id") if id: existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws) - arn = existing_acl.get('WebACL').get('ARN') + arn = existing_acl.get("WebACL").get("ARN") retval = camel_dict_to_snake_dict(list_wafv2_resources(wafv2, arn, module.fail_json_aws)) module.exit_json(**retval) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/wafv2_rule_group.py b/plugins/modules/wafv2_rule_group.py index 60cc60a131f..e2a7fd1d438 100644 --- a/plugins/modules/wafv2_rule_group.py +++ b/plugins/modules/wafv2_rule_group.py @@ -227,20 +227,20 @@ def __init__(self, wafv2, name, scope, fail_json_aws): def update(self, description, rules, sampled_requests, cloudwatch_metrics, metric_name): req_obj = { - 'Name': self.name, - 'Scope': self.scope, - 'Id': self.id, - 'Rules': rules, - 'LockToken': self.locktoken, - 'VisibilityConfig': { - 'SampledRequestsEnabled': sampled_requests, - 'CloudWatchMetricsEnabled': cloudwatch_metrics, - 'MetricName': metric_name - } + "Name": self.name, + "Scope": self.scope, + "Id": self.id, + "Rules": rules, + "LockToken": self.locktoken, + "VisibilityConfig": { + "SampledRequestsEnabled": sampled_requests, + "CloudWatchMetricsEnabled": cloudwatch_metrics, + "MetricName": metric_name, + }, } if description: - req_obj['Description'] = description + req_obj["Description"] = description try: response = self.wafv2.update_rule_group(**req_obj) @@ -252,11 +252,11 @@ def get_group(self): if self.id is None: response = self.list() - for item in response.get('RuleGroups'): - if item.get('Name') == self.name: - self.id = item.get('Id') - self.locktoken = item.get('LockToken') - self.arn = item.get('ARN') + for item in response.get("RuleGroups"): + if item.get("Name") == self.name: + self.id = item.get("Id") + self.locktoken = item.get("LockToken") + self.arn = item.get("ARN") return self.refresh_group() @@ -264,18 +264,14 @@ def refresh_group(self): existing_group = None if self.id: try: - response = self.wafv2.get_rule_group( - Name=self.name, - Scope=self.scope, - Id=self.id - ) - existing_group = response.get('RuleGroup') - self.locktoken = response.get('LockToken') + response = self.wafv2.get_rule_group(Name=self.name, Scope=self.scope, Id=self.id) + existing_group = response.get("RuleGroup") + self.locktoken = response.get("LockToken") except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to get wafv2 rule group.") tags = describe_wafv2_tags(self.wafv2, self.arn, self.fail_json_aws) - existing_group['tags'] = tags or {} + existing_group["tags"] = tags or {} return existing_group @@ -290,10 +286,7 @@ def get(self): def remove(self): try: response = self.wafv2.delete_rule_group( - Name=self.name, - Scope=self.scope, - Id=self.id, - LockToken=self.locktoken + Name=self.name, Scope=self.scope, Id=self.id, LockToken=self.locktoken ) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to delete wafv2 rule group.") @@ -301,22 +294,22 @@ def remove(self): def create(self, capacity, description, rules, sampled_requests, cloudwatch_metrics, metric_name, tags): req_obj = { - 'Name': self.name, - 'Scope': self.scope, - 'Capacity': capacity, - 'Rules': rules, - 'VisibilityConfig': { - 'SampledRequestsEnabled': sampled_requests, - 'CloudWatchMetricsEnabled': cloudwatch_metrics, - 'MetricName': metric_name - } + "Name": self.name, + "Scope": self.scope, + "Capacity": capacity, + "Rules": rules, + "VisibilityConfig": { + "SampledRequestsEnabled": sampled_requests, + "CloudWatchMetricsEnabled": cloudwatch_metrics, + "MetricName": metric_name, + }, } if description: - req_obj['Description'] = description + req_obj["Description"] = description if tags: - req_obj['Tags'] = ansible_dict_to_boto3_tag_list(tags) + req_obj["Tags"] = ansible_dict_to_boto3_tag_list(tags) try: response = self.wafv2.create_rule_group(**req_obj) @@ -329,26 +322,25 @@ def create(self, capacity, description, rules, sampled_requests, cloudwatch_metr def main(): - arg_spec = dict( - state=dict(type='str', required=True, choices=['present', 'absent']), - name=dict(type='str', required=True), - scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']), - capacity=dict(type='int'), - description=dict(type='str'), - rules=dict(type='list', elements='dict'), - sampled_requests=dict(type='bool', default=False), - cloudwatch_metrics=dict(type='bool', default=True), - metric_name=dict(type='str'), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), - purge_rules=dict(default=True, type='bool'), + state=dict(type="str", required=True, choices=["present", "absent"]), + name=dict(type="str", required=True), + scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), + capacity=dict(type="int"), + description=dict(type="str"), + rules=dict(type="list", elements="dict"), + sampled_requests=dict(type="bool", default=False), + cloudwatch_metrics=dict(type="bool", default=True), + metric_name=dict(type="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), + purge_rules=dict(default=True, type="bool"), ) module = AnsibleAWSModule( argument_spec=arg_spec, supports_check_mode=True, - required_if=[['state', 'present', ['capacity', 'rules']]] + required_if=[["state", "present", ["capacity", "rules"]]], ) state = module.params.get("state") @@ -373,31 +365,26 @@ def main(): if not metric_name: metric_name = name - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") rule_group = RuleGroup(wafv2, name, scope, module.fail_json_aws) change = False retval = {} - if state == 'present': + if state == "present": if rule_group.get(): - tagging_change = ensure_wafv2_tags(wafv2, rule_group.arn, tags, purge_tags, - module.fail_json_aws, module.check_mode) - rules_change, rules = compare_priority_rules(rule_group.get().get('Rules'), rules, purge_rules, state) - description_change = bool(description) and (rule_group.get().get('Description') != description) + tagging_change = ensure_wafv2_tags( + wafv2, rule_group.arn, tags, purge_tags, module.fail_json_aws, module.check_mode + ) + rules_change, rules = compare_priority_rules(rule_group.get().get("Rules"), rules, purge_rules, state) + description_change = bool(description) and (rule_group.get().get("Description") != description) change = tagging_change or rules_change or description_change retval = rule_group.get() if module.check_mode: # In check mode nothing changes... pass elif rules_change or description_change: - retval = rule_group.update( - description, - rules, - sampled_requests, - cloudwatch_metrics, - metric_name - ) + retval = rule_group.update(description, rules, sampled_requests, cloudwatch_metrics, metric_name) elif tagging_change: retval = rule_group.refresh_group() @@ -405,35 +392,25 @@ def main(): change = True if not check_mode: retval = rule_group.create( - capacity, - description, - rules, - sampled_requests, - cloudwatch_metrics, - metric_name, - tags + capacity, description, rules, sampled_requests, cloudwatch_metrics, metric_name, tags ) - elif state == 'absent': + elif state == "absent": if rule_group.get(): if rules: if len(rules) > 0: - change, rules = compare_priority_rules(rule_group.get().get('Rules'), rules, purge_rules, state) + change, rules = compare_priority_rules(rule_group.get().get("Rules"), rules, purge_rules, state) if change and not check_mode: retval = rule_group.update( - description, - rules, - sampled_requests, - cloudwatch_metrics, - metric_name + description, rules, sampled_requests, cloudwatch_metrics, metric_name ) else: change = True if not check_mode: retval = rule_group.remove() - module.exit_json(changed=change, **camel_dict_to_snake_dict(retval, ignore_list=['tags'])) + module.exit_json(changed=change, **camel_dict_to_snake_dict(retval, ignore_list=["tags"])) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/wafv2_rule_group_info.py b/plugins/modules/wafv2_rule_group_info.py index c95b74f81c7..58862a9a5f2 100644 --- a/plugins/modules/wafv2_rule_group_info.py +++ b/plugins/modules/wafv2_rule_group_info.py @@ -101,11 +101,7 @@ def get_rule_group(wafv2, name, scope, id, fail_json_aws): try: - response = wafv2.get_rule_group( - Name=name, - Scope=scope, - Id=id - ) + response = wafv2.get_rule_group(Name=name, Scope=scope, Id=id) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to get wafv2 rule group.") return response @@ -113,39 +109,39 @@ def get_rule_group(wafv2, name, scope, id, fail_json_aws): def main(): arg_spec = dict( - name=dict(type='str', required=True), - scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']) + name=dict(type="str", required=True), + scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), ) module = AnsibleAWSModule( argument_spec=arg_spec, - supports_check_mode=True + supports_check_mode=True, ) name = module.params.get("name") scope = module.params.get("scope") - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") # check if rule group exists response = wafv2_list_rule_groups(wafv2, scope, module.fail_json_aws) id = None retval = {} - for item in response.get('RuleGroups'): - if item.get('Name') == name: - id = item.get('Id') - arn = item.get('ARN') + for item in response.get("RuleGroups"): + if item.get("Name") == name: + id = item.get("Id") + arn = item.get("ARN") existing_group = None if id: existing_group = get_rule_group(wafv2, name, scope, id, module.fail_json_aws) - retval = camel_dict_to_snake_dict(existing_group.get('RuleGroup')) + retval = camel_dict_to_snake_dict(existing_group.get("RuleGroup")) tags = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) - retval['tags'] = tags or {} + retval["tags"] = tags or {} module.exit_json(**retval) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/wafv2_web_acl.py b/plugins/modules/wafv2_web_acl.py index 3b7fd8daa16..23e8f9c6b09 100644 --- a/plugins/modules/wafv2_web_acl.py +++ b/plugins/modules/wafv2_web_acl.py @@ -339,26 +339,35 @@ def __init__(self, wafv2, name, scope, fail_json_aws): self.fail_json_aws = fail_json_aws self.existing_acl, self.id, self.locktoken = self.get_web_acl() - def update(self, default_action, description, rules, sampled_requests, cloudwatch_metrics, metric_name, custom_response_bodies): + def update( + self, + default_action, + description, + rules, + sampled_requests, + cloudwatch_metrics, + metric_name, + custom_response_bodies, + ): req_obj = { - 'Name': self.name, - 'Scope': self.scope, - 'Id': self.id, - 'DefaultAction': default_action, - 'Rules': rules, - 'VisibilityConfig': { - 'SampledRequestsEnabled': sampled_requests, - 'CloudWatchMetricsEnabled': cloudwatch_metrics, - 'MetricName': metric_name + "Name": self.name, + "Scope": self.scope, + "Id": self.id, + "DefaultAction": default_action, + "Rules": rules, + "VisibilityConfig": { + "SampledRequestsEnabled": sampled_requests, + "CloudWatchMetricsEnabled": cloudwatch_metrics, + "MetricName": metric_name, }, - 'LockToken': self.locktoken + "LockToken": self.locktoken, } if description: - req_obj['Description'] = description + req_obj["Description"] = description if custom_response_bodies: - req_obj['CustomResponseBodies'] = custom_response_bodies + req_obj["CustomResponseBodies"] = custom_response_bodies try: response = self.wafv2.update_web_acl(**req_obj) @@ -370,12 +379,7 @@ def update(self, default_action, description, rules, sampled_requests, cloudwatc def remove(self): try: - response = self.wafv2.delete_web_acl( - Name=self.name, - Scope=self.scope, - Id=self.id, - LockToken=self.locktoken - ) + response = self.wafv2.delete_web_acl(Name=self.name, Scope=self.scope, Id=self.id, LockToken=self.locktoken) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to remove wafv2 web acl.") return response @@ -392,47 +396,53 @@ def get_web_acl(self): existing_acl = None response = self.list() - for item in response.get('WebACLs'): - if item.get('Name') == self.name: - id = item.get('Id') - locktoken = item.get('LockToken') - arn = item.get('ARN') + for item in response.get("WebACLs"): + if item.get("Name") == self.name: + id = item.get("Id") + locktoken = item.get("LockToken") + arn = item.get("ARN") if id: try: - existing_acl = self.wafv2.get_web_acl( - Name=self.name, - Scope=self.scope, - Id=id - ) + existing_acl = self.wafv2.get_web_acl(Name=self.name, Scope=self.scope, Id=id) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to get wafv2 web acl.") tags = describe_wafv2_tags(self.wafv2, arn, self.fail_json_aws) - existing_acl['tags'] = tags + existing_acl["tags"] = tags return existing_acl, id, locktoken def list(self): return wafv2_list_web_acls(self.wafv2, self.scope, self.fail_json_aws) - def create(self, default_action, rules, sampled_requests, cloudwatch_metrics, metric_name, tags, description, custom_response_bodies): + def create( + self, + default_action, + rules, + sampled_requests, + cloudwatch_metrics, + metric_name, + tags, + description, + custom_response_bodies, + ): req_obj = { - 'Name': self.name, - 'Scope': self.scope, - 'DefaultAction': default_action, - 'Rules': rules, - 'VisibilityConfig': { - 'SampledRequestsEnabled': sampled_requests, - 'CloudWatchMetricsEnabled': cloudwatch_metrics, - 'MetricName': metric_name - } + "Name": self.name, + "Scope": self.scope, + "DefaultAction": default_action, + "Rules": rules, + "VisibilityConfig": { + "SampledRequestsEnabled": sampled_requests, + "CloudWatchMetricsEnabled": cloudwatch_metrics, + "MetricName": metric_name, + }, } if custom_response_bodies: - req_obj['CustomResponseBodies'] = custom_response_bodies + req_obj["CustomResponseBodies"] = custom_response_bodies if description: - req_obj['Description'] = description + req_obj["Description"] = description if tags: - req_obj['Tags'] = ansible_dict_to_boto3_tag_list(tags) + req_obj["Tags"] = ansible_dict_to_boto3_tag_list(tags) try: response = self.wafv2.create_web_acl(**req_obj) @@ -444,7 +454,6 @@ def create(self, default_action, rules, sampled_requests, cloudwatch_metrics, me def format_result(result): - # We were returning details of the Web ACL inside a "web_acl" parameter on # creation, keep returning it to avoid breaking existing playbooks, but also # return what the docs said we return (and returned when no change happened) @@ -452,31 +461,30 @@ def format_result(result): if "WebACL" in retval: retval.update(retval["WebACL"]) - return camel_dict_to_snake_dict(retval, ignore_list=['tags']) + return camel_dict_to_snake_dict(retval, ignore_list=["tags"]) def main(): - arg_spec = dict( - state=dict(type='str', required=True, choices=['present', 'absent']), - name=dict(type='str', required=True), - scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']), - description=dict(type='str'), - default_action=dict(type='str', choices=['Block', 'Allow']), - rules=dict(type='list', elements='dict'), - sampled_requests=dict(type='bool', default=False), - cloudwatch_metrics=dict(type='bool', default=True), - metric_name=dict(type='str'), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), - custom_response_bodies=dict(type='dict'), - purge_rules=dict(default=True, type='bool'), + state=dict(type="str", required=True, choices=["present", "absent"]), + name=dict(type="str", required=True), + scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), + description=dict(type="str"), + default_action=dict(type="str", choices=["Block", "Allow"]), + rules=dict(type="list", elements="dict"), + sampled_requests=dict(type="bool", default=False), + cloudwatch_metrics=dict(type="bool", default=True), + metric_name=dict(type="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), + custom_response_bodies=dict(type="dict"), + purge_rules=dict(default=True, type="bool"), ) module = AnsibleAWSModule( argument_spec=arg_spec, supports_check_mode=True, - required_if=[['state', 'present', ['default_action', 'rules']]] + required_if=[["state", "present", ["default_action", "rules"]]], ) state = module.params.get("state") @@ -495,16 +503,16 @@ def main(): custom_response_bodies = module.params.get("custom_response_bodies") if custom_response_bodies: - module.require_botocore_at_least('1.20.40', reason='to set custom response bodies') + module.require_botocore_at_least("1.20.40", reason="to set custom response bodies") custom_response_bodies = {} for custom_name, body in module.params.get("custom_response_bodies").items(): custom_response_bodies[custom_name] = snake_dict_to_camel_dict(body, capitalize_first=True) - if default_action == 'Block': - default_action = {'Block': {}} - elif default_action == 'Allow': - default_action = {'Allow': {}} + if default_action == "Block": + default_action = {"Block": {}} + elif default_action == "Allow": + default_action = {"Allow": {}} if rules: rules = [] @@ -514,17 +522,19 @@ def main(): if not metric_name: metric_name = name - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") web_acl = WebACL(wafv2, name, scope, module.fail_json_aws) change = False retval = {} - if state == 'present': + if state == "present": if web_acl.get(): - tags_changed = ensure_wafv2_tags(wafv2, web_acl.get().get('WebACL').get('ARN'), tags, purge_tags, module.fail_json_aws, module.check_mode) - change, rules = compare_priority_rules(web_acl.get().get('WebACL').get('Rules'), rules, purge_rules, state) - change = change or (description and web_acl.get().get('WebACL').get('Description') != description) - change = change or (default_action and web_acl.get().get('WebACL').get('DefaultAction') != default_action) + tags_changed = ensure_wafv2_tags( + wafv2, web_acl.get().get("WebACL").get("ARN"), tags, purge_tags, module.fail_json_aws, module.check_mode + ) + change, rules = compare_priority_rules(web_acl.get().get("WebACL").get("Rules"), rules, purge_rules, state) + change = change or (description and web_acl.get().get("WebACL").get("Description") != description) + change = change or (default_action and web_acl.get().get("WebACL").get("DefaultAction") != default_action) if change and not check_mode: retval = web_acl.update( @@ -534,7 +544,7 @@ def main(): sampled_requests, cloudwatch_metrics, metric_name, - custom_response_bodies + custom_response_bodies, ) elif tags_changed: retval, id, locktoken = web_acl.get_web_acl() @@ -554,14 +564,16 @@ def main(): metric_name, tags, description, - custom_response_bodies + custom_response_bodies, ) - elif state == 'absent': + elif state == "absent": if web_acl.get(): if rules: if len(rules) > 0: - change, rules = compare_priority_rules(web_acl.get().get('WebACL').get('Rules'), rules, purge_rules, state) + change, rules = compare_priority_rules( + web_acl.get().get("WebACL").get("Rules"), rules, purge_rules, state + ) if change and not check_mode: retval = web_acl.update( default_action, @@ -570,7 +582,7 @@ def main(): sampled_requests, cloudwatch_metrics, metric_name, - custom_response_bodies + custom_response_bodies, ) else: change = True @@ -580,5 +592,5 @@ def main(): module.exit_json(changed=change, **format_result(retval)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/wafv2_web_acl_info.py b/plugins/modules/wafv2_web_acl_info.py index 8fe00f66a1a..e3cdc46e330 100644 --- a/plugins/modules/wafv2_web_acl_info.py +++ b/plugins/modules/wafv2_web_acl_info.py @@ -103,21 +103,16 @@ def get_web_acl(wafv2, name, scope, id, fail_json_aws): try: - response = wafv2.get_web_acl( - Name=name, - Scope=scope, - Id=id - ) + response = wafv2.get_web_acl(Name=name, Scope=scope, Id=id) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to get wafv2 web acl.") return response def main(): - arg_spec = dict( - name=dict(type='str', required=True), - scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']) + name=dict(type="str", required=True), + scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), ) module = AnsibleAWSModule( @@ -129,7 +124,7 @@ def main(): name = module.params.get("name") scope = module.params.get("scope") - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") # check if web acl exists response = wafv2_list_web_acls(wafv2, scope, module.fail_json_aws) @@ -137,19 +132,19 @@ def main(): arn = None retval = {} - for item in response.get('WebACLs'): - if item.get('Name') == name: - id = item.get('Id') - arn = item.get('ARN') + for item in response.get("WebACLs"): + if item.get("Name") == name: + id = item.get("Id") + arn = item.get("ARN") if id: existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws) - retval = camel_dict_to_snake_dict(existing_acl.get('WebACL')) + retval = camel_dict_to_snake_dict(existing_acl.get("WebACL")) tags = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) - retval['tags'] = tags + retval["tags"] = tags module.exit_json(**retval) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/tests/integration/targets/elb_target/files/ansible_lambda_target.py b/tests/integration/targets/elb_target/files/ansible_lambda_target.py index 3ea22472e91..2f10db49a95 100644 --- a/tests/integration/targets/elb_target/files/ansible_lambda_target.py +++ b/tests/integration/targets/elb_target/files/ansible_lambda_target.py @@ -1,10 +1,8 @@ -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import json def lambda_handler(event, context): - return { - 'statusCode': 200, - 'body': json.dumps('Hello from Lambda!') - } + return {"statusCode": 200, "body": json.dumps("Hello from Lambda!")} diff --git a/tests/integration/targets/glue_crawler/aliases b/tests/integration/targets/glue_crawler/aliases index 4ef4b2067d0..21fa9fd9829 100644 --- a/tests/integration/targets/glue_crawler/aliases +++ b/tests/integration/targets/glue_crawler/aliases @@ -1 +1,4 @@ cloud/aws + +disabled +# https://github.com/ansible-collections/community.aws/issues/1796 diff --git a/tests/integration/targets/s3_bucket_notification/files/mini_lambda.py b/tests/integration/targets/s3_bucket_notification/files/mini_lambda.py index d0d08dae90f..1710dbafdcc 100644 --- a/tests/integration/targets/s3_bucket_notification/files/mini_lambda.py +++ b/tests/integration/targets/s3_bucket_notification/files/mini_lambda.py @@ -1,13 +1,11 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import json def lambda_handler(event, context): - return { - 'statusCode': 200, - 'body': json.dumps('Hello from Lambda!') - } + return {"statusCode": 200, "body": json.dumps("Hello from Lambda!")} diff --git a/tests/integration/targets/s3_logging/aliases b/tests/integration/targets/s3_logging/aliases index 4ef4b2067d0..c3b9e5a23bc 100644 --- a/tests/integration/targets/s3_logging/aliases +++ b/tests/integration/targets/s3_logging/aliases @@ -1 +1,4 @@ cloud/aws + +# https://github.com/ansible-collections/community.aws/issues/1797 +disabled diff --git a/tests/integration/targets/s3_sync/aliases b/tests/integration/targets/s3_sync/aliases index 4ef4b2067d0..c3b9e5a23bc 100644 --- a/tests/integration/targets/s3_sync/aliases +++ b/tests/integration/targets/s3_sync/aliases @@ -1 +1,4 @@ cloud/aws + +# https://github.com/ansible-collections/community.aws/issues/1797 +disabled diff --git a/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py b/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py index ea2f51b0f4c..501a24d7e01 100644 --- a/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py +++ b/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py @@ -9,6 +9,7 @@ """ from __future__ import absolute_import, division, print_function + __metaclass__ = type import hashlib @@ -20,11 +21,11 @@ else: ssh_public_key = sys.argv[1] -with open(ssh_public_key, 'r') as key_fh: +with open(ssh_public_key, "r") as key_fh: data = key_fh.read() # Convert from SSH format to DER format -public_key = RSA.importKey(data).exportKey('DER') +public_key = RSA.importKey(data).exportKey("DER") md5digest = hashlib.md5(public_key).hexdigest() # Format the md5sum into the normal format pairs = zip(md5digest[::2], md5digest[1::2]) diff --git a/tests/integration/targets/sns_topic/files/sns_topic_lambda/sns_topic_lambda.py b/tests/integration/targets/sns_topic/files/sns_topic_lambda/sns_topic_lambda.py index 98f65783689..7288616460f 100644 --- a/tests/integration/targets/sns_topic/files/sns_topic_lambda/sns_topic_lambda.py +++ b/tests/integration/targets/sns_topic/files/sns_topic_lambda/sns_topic_lambda.py @@ -1,6 +1,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type diff --git a/tests/unit/compat/builtins.py b/tests/unit/compat/builtins.py index 349d310e86d..27bfe24507f 100644 --- a/tests/unit/compat/builtins.py +++ b/tests/unit/compat/builtins.py @@ -16,7 +16,8 @@ # along with Ansible. If not, see . # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type # @@ -28,6 +29,6 @@ try: import __builtin__ # pylint: disable=unused-import except ImportError: - BUILTINS = 'builtins' + BUILTINS = "builtins" else: - BUILTINS = '__builtin__' + BUILTINS = "__builtin__" diff --git a/tests/unit/compat/mock.py b/tests/unit/compat/mock.py index 0972cd2e8e5..54c1d20e7dc 100644 --- a/tests/unit/compat/mock.py +++ b/tests/unit/compat/mock.py @@ -16,12 +16,13 @@ # along with Ansible. If not, see . # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type -''' +""" Compat module for Python3.x's unittest.mock module -''' +""" import sys # Python 2.7 @@ -40,7 +41,7 @@ try: from mock import * except ImportError: - print('You need the mock library installed on python2.x to run tests') + print("You need the mock library installed on python2.x to run tests") # Prior to 3.4.4, mock_open cannot handle binary read_data @@ -51,7 +52,7 @@ def _iterate_read_data(read_data): # Helper for mock_open: # Retrieve lines from read_data via a generator so that separate calls to # readline, read, and readlines are properly interleaved - sep = b'\n' if isinstance(read_data, bytes) else '\n' + sep = b"\n" if isinstance(read_data, bytes) else "\n" data_as_list = [l + sep for l in read_data.split(sep)] if data_as_list[-1] == sep: @@ -67,7 +68,7 @@ def _iterate_read_data(read_data): for line in data_as_list: yield line - def mock_open(mock=None, read_data=''): + def mock_open(mock=None, read_data=""): """ A helper function to create a mock to replace the use of `open`. It works for `open` called directly or used as a context manager. @@ -79,6 +80,7 @@ def mock_open(mock=None, read_data=''): `read_data` is a string for the `read` methoddline`, and `readlines` of the file handle to return. This is an empty string by default. """ + def _readlines_side_effect(*args, **kwargs): if handle.readlines.return_value is not None: return handle.readlines.return_value @@ -99,10 +101,11 @@ def _readline_side_effect(): global file_spec if file_spec is None: import _io + file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) if mock is None: - mock = MagicMock(name='open', spec=open) + mock = MagicMock(name="open", spec=open) handle = MagicMock(spec=file_spec) handle.__enter__.return_value = handle diff --git a/tests/unit/compat/unittest.py b/tests/unit/compat/unittest.py index 98f08ad6a84..df3379b8209 100644 --- a/tests/unit/compat/unittest.py +++ b/tests/unit/compat/unittest.py @@ -16,12 +16,13 @@ # along with Ansible. If not, see . # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type -''' +""" Compat module for Python2.7's unittest module -''' +""" import sys @@ -33,6 +34,6 @@ # Need unittest2 on python2.6 from unittest2 import * except ImportError: - print('You need unittest2 installed on python2.6.x to run tests') + print("You need unittest2 installed on python2.6.x to run tests") else: from unittest import * diff --git a/tests/unit/mock/loader.py b/tests/unit/mock/loader.py index 00a5841274b..524870cfacc 100644 --- a/tests/unit/mock/loader.py +++ b/tests/unit/mock/loader.py @@ -16,7 +16,8 @@ # along with Ansible. If not, see . # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import os @@ -27,7 +28,6 @@ class DictDataLoader(DataLoader): - def __init__(self, file_mapping=None): file_mapping = {} if file_mapping is None else file_mapping assert type(file_mapping) == dict @@ -68,7 +68,7 @@ def is_directory(self, path): def list_directory(self, path): ret = [] path = to_text(path) - for x in (list(self._file_mapping.keys()) + self._known_directories): + for x in list(self._file_mapping.keys()) + self._known_directories: if x.startswith(path): if os.path.dirname(x) == path: ret.append(os.path.basename(x)) @@ -86,7 +86,7 @@ def _build_known_directories(self): self._known_directories = [] for path in self._file_mapping: dirname = os.path.dirname(path) - while dirname not in ('/', ''): + while dirname not in ("/", ""): self._add_known_directory(dirname) dirname = os.path.dirname(dirname) diff --git a/tests/unit/mock/path.py b/tests/unit/mock/path.py index 676b35ab8b5..7306033143f 100644 --- a/tests/unit/mock/path.py +++ b/tests/unit/mock/path.py @@ -1,6 +1,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type from ansible_collections.community.aws.tests.unit.compat.mock import MagicMock diff --git a/tests/unit/mock/procenv.py b/tests/unit/mock/procenv.py index e516a94589b..d4aeed1d930 100644 --- a/tests/unit/mock/procenv.py +++ b/tests/unit/mock/procenv.py @@ -17,7 +17,8 @@ # along with Ansible. If not, see . # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import sys @@ -31,7 +32,7 @@ @contextmanager -def swap_stdin_and_argv(stdin_data='', argv_data=tuple()): +def swap_stdin_and_argv(stdin_data="", argv_data=tuple()): """ context manager that temporarily masks the test runner's values for stdin and argv """ @@ -77,7 +78,7 @@ def swap_stdout(): class ModuleTestCase(unittest.TestCase): def setUp(self, module_args=None): if module_args is None: - module_args = {'_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False} + module_args = {"_ansible_remote_tmp": "/tmp", "_ansible_keep_remote_files": False} args = json.dumps(dict(ANSIBLE_MODULE_ARGS=module_args)) diff --git a/tests/unit/mock/vault_helper.py b/tests/unit/mock/vault_helper.py index b54629da49a..9f33ab3885f 100644 --- a/tests/unit/mock/vault_helper.py +++ b/tests/unit/mock/vault_helper.py @@ -1,6 +1,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type from ansible.module_utils._text import to_bytes @@ -9,19 +10,19 @@ class TextVaultSecret(VaultSecret): - '''A secret piece of text. ie, a password. Tracks text encoding. + """A secret piece of text. ie, a password. Tracks text encoding. The text encoding of the text may not be the default text encoding so - we keep track of the encoding so we encode it to the same bytes.''' + we keep track of the encoding so we encode it to the same bytes.""" def __init__(self, text, encoding=None, errors=None, _bytes=None): super(TextVaultSecret, self).__init__() self.text = text - self.encoding = encoding or 'utf-8' + self.encoding = encoding or "utf-8" self._bytes = _bytes - self.errors = errors or 'strict' + self.errors = errors or "strict" @property def bytes(self): - '''The text encoded with encoding, unless we specifically set _bytes.''' + """The text encoded with encoding, unless we specifically set _bytes.""" return self._bytes or to_bytes(self.text, encoding=self.encoding, errors=self.errors) diff --git a/tests/unit/mock/yaml_helper.py b/tests/unit/mock/yaml_helper.py index a646c0241c7..dd3b9a5d810 100644 --- a/tests/unit/mock/yaml_helper.py +++ b/tests/unit/mock/yaml_helper.py @@ -1,6 +1,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import io @@ -13,6 +14,7 @@ class YamlTestUtils(object): """Mixin class to combine with a unittest.TestCase subclass.""" + def _loader(self, stream): """Vault related tests will want to override this. @@ -45,8 +47,7 @@ def _dump_load_cycle(self, obj): obj_2 = loader.get_data() # dump the gen 2 objects directory to strings - string_from_object_dump_2 = self._dump_string(obj_2, - dumper=AnsibleDumper) + string_from_object_dump_2 = self._dump_string(obj_2, dumper=AnsibleDumper) # The gen 1 and gen 2 yaml strings self.assertEqual(string_from_object_dump, string_from_object_dump_2) @@ -66,7 +67,7 @@ def _dump_load_cycle(self, obj): self.assertEqual(string_from_object_dump, string_from_object_dump_3) def _old_dump_load_cycle(self, obj): - '''Dump the passed in object to yaml, load it back up, dump again, compare.''' + """Dump the passed in object to yaml, load it back up, dump again, compare.""" stream = io.StringIO() yaml_string = self._dump_string(obj, dumper=AnsibleDumper) @@ -111,16 +112,23 @@ def _old_dump_load_cycle(self, obj): assert yaml_string == yaml_string_obj_from_stream assert yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string - assert (yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string == yaml_string_stream_obj_from_stream == - yaml_string_stream_obj_from_string) + assert ( + yaml_string + == yaml_string_obj_from_stream + == yaml_string_obj_from_string + == yaml_string_stream_obj_from_stream + == yaml_string_stream_obj_from_string + ) assert obj == obj_from_stream assert obj == obj_from_string assert obj == yaml_string_obj_from_stream assert obj == yaml_string_obj_from_string assert obj == obj_from_stream == obj_from_string == yaml_string_obj_from_stream == yaml_string_obj_from_string - return {'obj': obj, - 'yaml_string': yaml_string, - 'yaml_string_from_stream': yaml_string_from_stream, - 'obj_from_stream': obj_from_stream, - 'obj_from_string': obj_from_string, - 'yaml_string_obj_from_string': yaml_string_obj_from_string} + return { + "obj": obj, + "yaml_string": yaml_string, + "yaml_string_from_stream": yaml_string_from_stream, + "obj_from_stream": obj_from_stream, + "obj_from_string": obj_from_string, + "yaml_string_obj_from_string": yaml_string_obj_from_string, + } diff --git a/tests/unit/plugins/connection/test_aws_ssm.py b/tests/unit/plugins/connection/test_aws_ssm.py index 579cafc160c..cb447645e42 100644 --- a/tests/unit/plugins/connection/test_aws_ssm.py +++ b/tests/unit/plugins/connection/test_aws_ssm.py @@ -1,5 +1,6 @@ # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type from io import StringIO @@ -15,46 +16,45 @@ pytestmark = pytest.mark.skip("test_data_pipeline.py requires the python modules 'boto3' and 'botocore'") -class TestConnectionBaseClass(): - - @patch('os.path.exists') - @patch('subprocess.Popen') - @patch('select.poll') - @patch('boto3.client') +class TestConnectionBaseClass: + @patch("os.path.exists") + @patch("subprocess.Popen") + @patch("select.poll") + @patch("boto3.client") def test_plugins_connection_aws_ssm_start_session(self, boto_client, s_poll, s_popen, mock_ospe): pc = PlayContext() new_stdin = StringIO() - conn = connection_loader.get('community.aws.aws_ssm', pc, new_stdin) + conn = connection_loader.get("community.aws.aws_ssm", pc, new_stdin) conn.get_option = MagicMock() - conn.get_option.side_effect = ['i1234', 'executable', 'abcd', 'i1234'] - conn.host = 'abc' + conn.get_option.side_effect = ["i1234", "executable", "abcd", "i1234"] + conn.host = "abc" mock_ospe.return_value = True boto3 = MagicMock() - boto3.client('ssm').return_value = MagicMock() + boto3.client("ssm").return_value = MagicMock() conn.start_session = MagicMock() conn._session_id = MagicMock() - conn._session_id.return_value = 's1' + conn._session_id.return_value = "s1" s_popen.return_value.stdin.write = MagicMock() s_poll.return_value = MagicMock() s_poll.return_value.register = MagicMock() s_popen.return_value.poll = MagicMock() s_popen.return_value.poll.return_value = None conn._stdin_readline = MagicMock() - conn._stdin_readline.return_value = 'abc123' - conn.SESSION_START = 'abc' + conn._stdin_readline.return_value = "abc123" + conn.SESSION_START = "abc" conn.start_session() - @patch('random.choice') + @patch("random.choice") def test_plugins_connection_aws_ssm_exec_command(self, r_choice): pc = PlayContext() new_stdin = StringIO() - conn = connection_loader.get('community.aws.aws_ssm', pc, new_stdin) - r_choice.side_effect = ['a', 'a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'b'] + conn = connection_loader.get("community.aws.aws_ssm", pc, new_stdin) + r_choice.side_effect = ["a", "a", "a", "a", "a", "b", "b", "b", "b", "b"] conn.MARK_LENGTH = 5 conn._session = MagicMock() conn._session.stdin.write = MagicMock() conn._wrap_command = MagicMock() - conn._wrap_command.return_value = 'cmd1' + conn._wrap_command.return_value = "cmd1" conn._flush_stderr = MagicMock() conn._windows = MagicMock() conn._windows.return_value = True @@ -67,44 +67,44 @@ def test_plugins_connection_aws_ssm_exec_command(self, r_choice): conn._session.stdout = MagicMock() conn._session.stdout.readline = MagicMock() conn._post_process = MagicMock() - conn._post_process.return_value = 'test' - conn._session.stdout.readline.side_effect = iter(['aaaaa\n', 'Hi\n', '0\n', 'bbbbb\n']) + conn._post_process.return_value = "test" + conn._session.stdout.readline.side_effect = iter(["aaaaa\n", "Hi\n", "0\n", "bbbbb\n"]) conn.get_option = MagicMock() conn.get_option.return_value = 1 - returncode = 'a' - stdout = 'b' + returncode = "a" + stdout = "b" return (returncode, stdout, conn._flush_stderr) def test_plugins_connection_aws_ssm_prepare_terminal(self): pc = PlayContext() new_stdin = StringIO() - conn = connection_loader.get('community.aws.aws_ssm', pc, new_stdin) + conn = connection_loader.get("community.aws.aws_ssm", pc, new_stdin) conn.is_windows = MagicMock() conn.is_windows.return_value = True def test_plugins_connection_aws_ssm_wrap_command(self): pc = PlayContext() new_stdin = StringIO() - conn = connection_loader.get('community.aws.aws_ssm', pc, new_stdin) + conn = connection_loader.get("community.aws.aws_ssm", pc, new_stdin) conn.is_windows = MagicMock() conn.is_windows.return_value = True - return 'windows1' + return "windows1" def test_plugins_connection_aws_ssm_post_process(self): pc = PlayContext() new_stdin = StringIO() - conn = connection_loader.get('community.aws.aws_ssm', pc, new_stdin) + conn = connection_loader.get("community.aws.aws_ssm", pc, new_stdin) conn.is_windows = MagicMock() conn.is_windows.return_value = True conn.stdout = MagicMock() returncode = 0 return returncode, conn.stdout - @patch('subprocess.Popen') + @patch("subprocess.Popen") def test_plugins_connection_aws_ssm_flush_stderr(self, s_popen): pc = PlayContext() new_stdin = StringIO() - conn = connection_loader.get('community.aws.aws_ssm', pc, new_stdin) + conn = connection_loader.get("community.aws.aws_ssm", pc, new_stdin) conn.poll_stderr = MagicMock() conn.poll_stderr.register = MagicMock() conn.stderr = None @@ -121,37 +121,37 @@ def test_plugins_connection_aws_ssm_flush_stderr(self, s_popen): # boto3.generate_presigned_url.return_value = MagicMock() # return (boto3.generate_presigned_url.return_value) - @patch('os.path.exists') + @patch("os.path.exists") def test_plugins_connection_aws_ssm_put_file(self, mock_ospe): pc = PlayContext() new_stdin = StringIO() - conn = connection_loader.get('community.aws.aws_ssm', pc, new_stdin) + conn = connection_loader.get("community.aws.aws_ssm", pc, new_stdin) conn._connect = MagicMock() conn._file_transport_command = MagicMock() - conn._file_transport_command.return_value = (0, 'stdout', 'stderr') - conn.put_file('/in/file', '/out/file') + conn._file_transport_command.return_value = (0, "stdout", "stderr") + conn.put_file("/in/file", "/out/file") def test_plugins_connection_aws_ssm_fetch_file(self): pc = PlayContext() new_stdin = StringIO() - conn = connection_loader.get('community.aws.aws_ssm', pc, new_stdin) + conn = connection_loader.get("community.aws.aws_ssm", pc, new_stdin) conn._connect = MagicMock() conn._file_transport_command = MagicMock() - conn._file_transport_command.return_value = (0, 'stdout', 'stderr') - conn.fetch_file('/in/file', '/out/file') + conn._file_transport_command.return_value = (0, "stdout", "stderr") + conn.fetch_file("/in/file", "/out/file") - @patch('subprocess.check_output') - @patch('boto3.client') + @patch("subprocess.check_output") + @patch("boto3.client") def test_plugins_connection_file_transport_command(self, boto_client, s_check_output): pc = PlayContext() new_stdin = StringIO() - conn = connection_loader.get('community.aws.aws_ssm', pc, new_stdin) + conn = connection_loader.get("community.aws.aws_ssm", pc, new_stdin) conn.get_option = MagicMock() - conn.get_option.side_effect = ['1', '2', '3', '4', '5'] + conn.get_option.side_effect = ["1", "2", "3", "4", "5"] conn._get_url = MagicMock() - conn._get_url.side_effect = ['url1', 'url2'] + conn._get_url.side_effect = ["url1", "url2"] boto3 = MagicMock() - boto3.client('s3').return_value = MagicMock() + boto3.client("s3").return_value = MagicMock() conn.get_option.return_value = 1 get_command = MagicMock() put_command = MagicMock() @@ -161,11 +161,11 @@ def test_plugins_connection_file_transport_command(self, boto_client, s_check_ou conn.exec_command(put_command, in_data=None, sudoable=False) conn.exec_command(get_command, in_data=None, sudoable=False) - @patch('subprocess.check_output') + @patch("subprocess.check_output") def test_plugins_connection_aws_ssm_close(self, s_check_output): pc = PlayContext() new_stdin = StringIO() - conn = connection_loader.get('community.aws.aws_ssm', pc, new_stdin) + conn = connection_loader.get("community.aws.aws_ssm", pc, new_stdin) conn.instance_id = "i-12345" conn._session_id = True conn.get_option = MagicMock() @@ -174,8 +174,8 @@ def test_plugins_connection_aws_ssm_close(self, s_check_output): conn._session.terminate = MagicMock() conn._session.communicate = MagicMock() conn._terminate_session = MagicMock() - conn._terminate_session.return_value = '' + conn._terminate_session.return_value = "" conn._session_id = MagicMock() - conn._session_id.return_value = 'a' + conn._session_id.return_value = "a" conn._client = MagicMock() conn.close() diff --git a/tests/unit/plugins/modules/conftest.py b/tests/unit/plugins/modules/conftest.py index a7d1e0475f2..523fd5837ac 100644 --- a/tests/unit/plugins/modules/conftest.py +++ b/tests/unit/plugins/modules/conftest.py @@ -1,7 +1,8 @@ # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import json @@ -18,14 +19,14 @@ def patch_ansible_module(request, mocker): if isinstance(request.param, string_types): args = request.param elif isinstance(request.param, MutableMapping): - if 'ANSIBLE_MODULE_ARGS' not in request.param: - request.param = {'ANSIBLE_MODULE_ARGS': request.param} - if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']: - request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp' - if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']: - request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False + if "ANSIBLE_MODULE_ARGS" not in request.param: + request.param = {"ANSIBLE_MODULE_ARGS": request.param} + if "_ansible_remote_tmp" not in request.param["ANSIBLE_MODULE_ARGS"]: + request.param["ANSIBLE_MODULE_ARGS"]["_ansible_remote_tmp"] = "/tmp" + if "_ansible_keep_remote_files" not in request.param["ANSIBLE_MODULE_ARGS"]: + request.param["ANSIBLE_MODULE_ARGS"]["_ansible_keep_remote_files"] = False args = json.dumps(request.param) else: - raise Exception('Malformed data to the patch_ansible_module pytest fixture') + raise Exception("Malformed data to the patch_ansible_module pytest fixture") - mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args)) + mocker.patch("ansible.module_utils.basic._ANSIBLE_ARGS", to_bytes(args)) diff --git a/tests/unit/plugins/modules/test_acm_certificate.py b/tests/unit/plugins/modules/test_acm_certificate.py index 726601fe86b..95f669f7c42 100644 --- a/tests/unit/plugins/modules/test_acm_certificate.py +++ b/tests/unit/plugins/modules/test_acm_certificate.py @@ -15,7 +15,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type from pprint import pprint @@ -26,7 +27,6 @@ def test_chain_compare(): - # The functions we're testing take module as an argument # Just so they can call module.fail_json # Let's just use None for the unit tests, @@ -34,14 +34,14 @@ def test_chain_compare(): # And if they do, fail_json is not applicable module = None - fixture_suffix = 'tests/unit/plugins/modules/fixtures/certs' + fixture_suffix = "tests/unit/plugins/modules/fixtures/certs" # Test chain split function on super simple (invalid) certs - expected = ['aaa', 'bbb', 'ccc'] + expected = ["aaa", "bbb", "ccc"] - for fname in ['simple-chain-a.cert', 'simple-chain-b.cert']: - path = fixture_suffix + '/' + fname - with open(path, 'r') as f: + for fname in ["simple-chain-a.cert", "simple-chain-b.cert"]: + path = fixture_suffix + "/" + fname + with open(path, "r") as f: pem = to_text(f.read()) actual = pem_chain_split(module, pem) actual = [a.strip() for a in actual] @@ -55,71 +55,55 @@ def test_chain_compare(): # Now test real chains # chains with same same_as should be considered equal test_chains = [ - { # Original Cert chain - 'path': fixture_suffix + '/chain-1.0.cert', - 'same_as': 1, - 'length': 3 - }, - { # Same as 1.0, but longer PEM lines - 'path': fixture_suffix + '/chain-1.1.cert', - 'same_as': 1, - 'length': 3 - }, + {"path": fixture_suffix + "/chain-1.0.cert", "same_as": 1, "length": 3}, # Original Cert chain + {"path": fixture_suffix + "/chain-1.1.cert", "same_as": 1, "length": 3}, # Same as 1.0, but longer PEM lines { # Same as 1.0, but without the stuff before each -------- - 'path': fixture_suffix + '/chain-1.2.cert', - 'same_as': 1, - 'length': 3 + "path": fixture_suffix + "/chain-1.2.cert", + "same_as": 1, + "length": 3, }, { # Same as 1.0, but in a different order, so should be considered different - 'path': fixture_suffix + '/chain-1.3.cert', - 'same_as': 2, - 'length': 3 + "path": fixture_suffix + "/chain-1.3.cert", + "same_as": 2, + "length": 3, }, { # Same as 1.0, but with last link missing - 'path': fixture_suffix + '/chain-1.4.cert', - 'same_as': 3, - 'length': 2 + "path": fixture_suffix + "/chain-1.4.cert", + "same_as": 3, + "length": 2, }, { # Completely different cert chain to all the others - 'path': fixture_suffix + '/chain-4.cert', - 'same_as': 4, - 'length': 3 - }, - { # Single cert - 'path': fixture_suffix + '/a.pem', - 'same_as': 5, - 'length': 1 + "path": fixture_suffix + "/chain-4.cert", + "same_as": 4, + "length": 3, }, - { # a different, single cert - 'path': fixture_suffix + '/b.pem', - 'same_as': 6, - 'length': 1 - } + {"path": fixture_suffix + "/a.pem", "same_as": 5, "length": 1}, # Single cert + {"path": fixture_suffix + "/b.pem", "same_as": 6, "length": 1}, # a different, single cert ] for chain in test_chains: - with open(chain['path'], 'r') as f: - chain['pem_text'] = to_text(f.read()) + with open(chain["path"], "r") as f: + chain["pem_text"] = to_text(f.read()) # Test to make sure our regex isn't too greedy - chain['split'] = pem_chain_split(module, chain['pem_text']) - if len(chain['split']) != chain['length']: + chain["split"] = pem_chain_split(module, chain["pem_text"]) + if len(chain["split"]) != chain["length"]: print("Cert before split") - print(chain['pem_text']) + print(chain["pem_text"]) print("Cert after split") - pprint(chain['split']) - print("path: %s" % chain['path']) - print("Expected chain length: %d" % chain['length']) - print("Actual chain length: %d" % len(chain['split'])) - raise AssertionError("Chain %s was not split properly" % chain['path']) + pprint(chain["split"]) + print("path: %s" % chain["path"]) + print("Expected chain length: %d" % chain["length"]) + print("Actual chain length: %d" % len(chain["split"])) + raise AssertionError("Chain %s was not split properly" % chain["path"]) for chain_a in test_chains: for chain_b in test_chains: - expected = (chain_a['same_as'] == chain_b['same_as']) + expected = chain_a["same_as"] == chain_b["same_as"] # Now test the comparison function - actual = chain_compare(module, chain_a['pem_text'], chain_b['pem_text']) + actual = chain_compare(module, chain_a["pem_text"], chain_b["pem_text"]) if expected != actual: - print("Error, unexpected comparison result between \n%s\nand\n%s" % (chain_a['path'], chain_b['path'])) + print("Error, unexpected comparison result between \n%s\nand\n%s" % (chain_a["path"], chain_b["path"])) print("Expected %s got %s" % (str(expected), str(actual))) assert expected == actual diff --git a/tests/unit/plugins/modules/test_api_gateway.py b/tests/unit/plugins/modules/test_api_gateway.py index a6f2c3e91d6..d6db6892153 100644 --- a/tests/unit/plugins/modules/test_api_gateway.py +++ b/tests/unit/plugins/modules/test_api_gateway.py @@ -5,7 +5,8 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import sys @@ -25,7 +26,7 @@ def fake_exit_json(self, **kwargs): - """ store the kwargs given to exit_json rather than putting them out to stdout""" + """store the kwargs given to exit_json rather than putting them out to stdout""" global exit_return_dict exit_return_dict = kwargs sys.exit(0) @@ -33,7 +34,6 @@ def fake_exit_json(self, **kwargs): def test_upload_api(monkeypatch): class FakeConnection: - def put_rest_api(self, *args, **kwargs): assert kwargs["body"] == "the-swagger-text-is-fake" return {"msg": "success!"} @@ -46,25 +46,29 @@ def return_fake_connection(*args, **kwargs): monkeypatch.setattr(aws_modules, "boto3_conn", return_fake_connection) monkeypatch.setattr(aws_modules.AnsibleAWSModule, "exit_json", fake_exit_json) - set_module_args({ - "api_id": "fred", - "state": "present", - "swagger_text": "the-swagger-text-is-fake", - "region": 'mars-north-1', - "_ansible_tmpdir": "/tmp/ansibl-abcdef", - }) + set_module_args( + { + "api_id": "fred", + "state": "present", + "swagger_text": "the-swagger-text-is-fake", + "region": "mars-north-1", + "_ansible_tmpdir": "/tmp/ansibl-abcdef", + } + ) with pytest.raises(SystemExit): agw.main() assert exit_return_dict["changed"] def test_warn_if_region_not_specified(): - - set_module_args({ - "name": "api_gateway", - "state": "present", - "runtime": 'python2.7', - "role": 'arn:aws:iam::123456789012:role/lambda_basic_execution', - "handler": 'lambda_python.my_handler'}) + set_module_args( + { + "name": "api_gateway", + "state": "present", + "runtime": "python2.7", + "role": "arn:aws:iam::123456789012:role/lambda_basic_execution", + "handler": "lambda_python.my_handler", + } + ) with pytest.raises(SystemExit): print(agw.main()) diff --git a/tests/unit/plugins/modules/test_data_pipeline.py b/tests/unit/plugins/modules/test_data_pipeline.py index 1a188e8ed57..2d6f7500209 100644 --- a/tests/unit/plugins/modules/test_data_pipeline.py +++ b/tests/unit/plugins/modules/test_data_pipeline.py @@ -4,7 +4,8 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import collections @@ -20,8 +21,11 @@ pass # Magic... Incorrectly identified by pylint as unused -from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep # pylint: disable=unused-import -from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify # pylint: disable=unused-import +# pylint: disable-next=unused-import +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep + +# pylint: disable-next=unused-import +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 from ansible_collections.community.aws.plugins.modules import data_pipeline @@ -34,7 +38,7 @@ class FailException(Exception): pass -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def dp_setup(): """ Yield a FakeModule object, data pipeline id of a vanilla data pipeline, and data pipeline objects @@ -44,41 +48,41 @@ def dp_setup(): Dependencies = collections.namedtuple("Dependencies", ["module", "data_pipeline_id", "objects"]) # get objects to use to test populating and activating the data pipeline - if not os.getenv('PLACEBO_RECORD'): - objects = [{"name": "Every 1 day", - "id": "DefaultSchedule", - "fields": []}, - {"name": "Default", - "id": "Default", - "fields": []}] + if not os.getenv("PLACEBO_RECORD"): + objects = [ + {"name": "Every 1 day", "id": "DefaultSchedule", "fields": []}, + {"name": "Default", "id": "Default", "fields": []}, + ] else: - s3 = boto3.client('s3') + s3 = boto3.client("s3") data = s3.get_object(Bucket="ansible-test-datapipeline", Key="pipeline-object/new.json") - objects = json.loads(to_text(data['Body'].read())) + objects = json.loads(to_text(data["Body"].read())) # create a module with vanilla data pipeline parameters - params = {'name': 'ansible-test-create-pipeline', - 'description': 'ansible-datapipeline-unit-test', - 'state': 'present', - 'timeout': 300, - 'objects': [], - 'tags': {}, - 'parameters': [], - 'values': []} + params = { + "name": "ansible-test-create-pipeline", + "description": "ansible-datapipeline-unit-test", + "state": "present", + "timeout": 300, + "objects": [], + "tags": {}, + "parameters": [], + "values": [], + } module = FakeModule(**params) # yield a module, the data pipeline id, and the data pipeline objects (that are not yet defining the vanilla data pipeline) - if not os.getenv('PLACEBO_RECORD'): - yield Dependencies(module=module, data_pipeline_id='df-0590406117G8DPQZY2HA', objects=objects) + if not os.getenv("PLACEBO_RECORD"): + yield Dependencies(module=module, data_pipeline_id="df-0590406117G8DPQZY2HA", objects=objects) else: - connection = boto3.client('datapipeline') + connection = boto3.client("datapipeline") _changed, result = data_pipeline.create_pipeline(connection, module) - data_pipeline_id = result['data_pipeline']['pipeline_id'] + data_pipeline_id = result["data_pipeline"]["pipeline_id"] yield Dependencies(module=module, data_pipeline_id=data_pipeline_id, objects=objects) # remove data pipeline - if os.getenv('PLACEBO_RECORD'): - module.params.update(state='absent') + if os.getenv("PLACEBO_RECORD"): + module.params.update(state="absent") data_pipeline.delete_pipeline(connection, module) @@ -89,7 +93,7 @@ def __init__(self, **kwargs): def fail_json(self, *args, **kwargs): self.exit_args = args self.exit_kwargs = kwargs - raise FailException('FAIL') + raise FailException("FAIL") def exit_json(self, *args, **kwargs): self.exit_args = args @@ -97,91 +101,101 @@ def exit_json(self, *args, **kwargs): def test_create_pipeline_already_exists(placeboify, maybe_sleep, dp_setup): - connection = placeboify.client('datapipeline') + connection = placeboify.client("datapipeline") changed, result = data_pipeline.create_pipeline(connection, dp_setup.module) assert changed is False - assert "Data Pipeline ansible-test-create-pipeline is present" in result['msg'] + assert "Data Pipeline ansible-test-create-pipeline is present" in result["msg"] def test_pipeline_field(placeboify, maybe_sleep, dp_setup): - connection = placeboify.client('datapipeline') + connection = placeboify.client("datapipeline") pipeline_field_info = data_pipeline.pipeline_field(connection, dp_setup.data_pipeline_id, "@pipelineState") assert pipeline_field_info == "PENDING" def test_define_pipeline(placeboify, maybe_sleep, dp_setup): - connection = placeboify.client('datapipeline') - changed, result = data_pipeline.define_pipeline(connection, dp_setup.module, dp_setup.objects, dp_setup.data_pipeline_id) + connection = placeboify.client("datapipeline") + changed, result = data_pipeline.define_pipeline( + connection, dp_setup.module, dp_setup.objects, dp_setup.data_pipeline_id + ) assert changed is True - assert 'has been updated' in result + assert "has been updated" in result def test_deactivate_pipeline(placeboify, maybe_sleep, dp_setup): - connection = placeboify.client('datapipeline') + connection = placeboify.client("datapipeline") _changed, result = data_pipeline.deactivate_pipeline(connection, dp_setup.module) # XXX possible bug # assert changed is True - assert "Data Pipeline ansible-test-create-pipeline deactivated" in result['msg'] + assert "Data Pipeline ansible-test-create-pipeline deactivated" in result["msg"] def test_activate_without_population(placeboify, maybe_sleep, dp_setup): - connection = placeboify.client('datapipeline') + connection = placeboify.client("datapipeline") with pytest.raises(FailException): _changed, _result = data_pipeline.activate_pipeline(connection, dp_setup.module) - assert dp_setup.module.exit_kwargs.get('msg') == "You need to populate your pipeline before activation." + assert dp_setup.module.exit_kwargs.get("msg") == "You need to populate your pipeline before activation." def test_create_pipeline(placeboify, maybe_sleep): - connection = placeboify.client('datapipeline') - params = {'name': 'ansible-unittest-create-pipeline', - 'description': 'ansible-datapipeline-unit-test', - 'state': 'present', - 'timeout': 300, - 'tags': {}} + connection = placeboify.client("datapipeline") + params = { + "name": "ansible-unittest-create-pipeline", + "description": "ansible-datapipeline-unit-test", + "state": "present", + "timeout": 300, + "tags": {}, + } m = FakeModule(**params) changed, result = data_pipeline.create_pipeline(connection, m) assert changed is True - assert result['msg'] == "Data Pipeline ansible-unittest-create-pipeline created." + assert result["msg"] == "Data Pipeline ansible-unittest-create-pipeline created." data_pipeline.delete_pipeline(connection, m) def test_create_pipeline_with_tags(placeboify, maybe_sleep): - connection = placeboify.client('datapipeline') - params = {'name': 'ansible-unittest-create-pipeline_tags', - 'description': 'ansible-datapipeline-unit-test', - 'state': 'present', - 'tags': {'ansible': 'test'}, - 'timeout': 300} + connection = placeboify.client("datapipeline") + params = { + "name": "ansible-unittest-create-pipeline_tags", + "description": "ansible-datapipeline-unit-test", + "state": "present", + "tags": {"ansible": "test"}, + "timeout": 300, + } m = FakeModule(**params) changed, result = data_pipeline.create_pipeline(connection, m) assert changed is True - assert result['msg'] == "Data Pipeline ansible-unittest-create-pipeline_tags created." + assert result["msg"] == "Data Pipeline ansible-unittest-create-pipeline_tags created." data_pipeline.delete_pipeline(connection, m) def test_delete_nonexistent_pipeline(placeboify, maybe_sleep): - connection = placeboify.client('datapipeline') - params = {'name': 'ansible-test-nonexistent', - 'description': 'ansible-test-nonexistent', - 'state': 'absent', - 'objects': [], - 'tags': {'ansible': 'test'}, - 'timeout': 300} + connection = placeboify.client("datapipeline") + params = { + "name": "ansible-test-nonexistent", + "description": "ansible-test-nonexistent", + "state": "absent", + "objects": [], + "tags": {"ansible": "test"}, + "timeout": 300, + } m = FakeModule(**params) changed, _result = data_pipeline.delete_pipeline(connection, m) assert changed is False def test_delete_pipeline(placeboify, maybe_sleep): - connection = placeboify.client('datapipeline') - params = {'name': 'ansible-test-nonexistent', - 'description': 'ansible-test-nonexistent', - 'state': 'absent', - 'objects': [], - 'tags': {'ansible': 'test'}, - 'timeout': 300} + connection = placeboify.client("datapipeline") + params = { + "name": "ansible-test-nonexistent", + "description": "ansible-test-nonexistent", + "state": "absent", + "objects": [], + "tags": {"ansible": "test"}, + "timeout": 300, + } m = FakeModule(**params) data_pipeline.create_pipeline(connection, m) changed, _result = data_pipeline.delete_pipeline(connection, m) @@ -189,29 +203,29 @@ def test_delete_pipeline(placeboify, maybe_sleep): def test_build_unique_id_different(): - m = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id'}) - m2 = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id-different'}) + m = FakeModule(**{"name": "ansible-unittest-1", "description": "test-unique-id"}) + m2 = FakeModule(**{"name": "ansible-unittest-1", "description": "test-unique-id-different"}) assert data_pipeline.build_unique_id(m) != data_pipeline.build_unique_id(m2) def test_build_unique_id_same(): - m = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id', 'tags': {'ansible': 'test'}}) - m2 = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id', 'tags': {'ansible': 'test'}}) + m = FakeModule(**{"name": "ansible-unittest-1", "description": "test-unique-id", "tags": {"ansible": "test"}}) + m2 = FakeModule(**{"name": "ansible-unittest-1", "description": "test-unique-id", "tags": {"ansible": "test"}}) assert data_pipeline.build_unique_id(m) == data_pipeline.build_unique_id(m2) def test_build_unique_id_obj(): # check that the object can be different and the unique id should be the same; should be able to modify objects - m = FakeModule(**{'name': 'ansible-unittest-1', 'objects': [{'first': 'object'}]}) - m2 = FakeModule(**{'name': 'ansible-unittest-1', 'objects': [{'second': 'object'}]}) + m = FakeModule(**{"name": "ansible-unittest-1", "objects": [{"first": "object"}]}) + m2 = FakeModule(**{"name": "ansible-unittest-1", "objects": [{"second": "object"}]}) assert data_pipeline.build_unique_id(m) == data_pipeline.build_unique_id(m2) def test_format_tags(): - unformatted_tags = {'key1': 'val1', 'key2': 'val2', 'key3': 'val3'} + unformatted_tags = {"key1": "val1", "key2": "val2", "key3": "val3"} formatted_tags = data_pipeline.format_tags(unformatted_tags) for tag_set in formatted_tags: - assert unformatted_tags[tag_set['key']] == tag_set['value'] + assert unformatted_tags[tag_set["key"]] == tag_set["value"] def test_format_empty_tags(): @@ -221,45 +235,44 @@ def test_format_empty_tags(): def test_pipeline_description(placeboify, maybe_sleep, dp_setup): - connection = placeboify.client('datapipeline') + connection = placeboify.client("datapipeline") dp_id = dp_setup.data_pipeline_id pipelines = data_pipeline.pipeline_description(connection, dp_id) - assert dp_id == pipelines['pipelineDescriptionList'][0]['pipelineId'] + assert dp_id == pipelines["pipelineDescriptionList"][0]["pipelineId"] def test_pipeline_description_nonexistent(placeboify, maybe_sleep): hypothetical_pipeline_id = "df-015440025PF7YGLDK47C" - connection = placeboify.client('datapipeline') + connection = placeboify.client("datapipeline") with pytest.raises(data_pipeline.DataPipelineNotFound): data_pipeline.pipeline_description(connection, hypothetical_pipeline_id) def test_check_dp_exists_true(placeboify, maybe_sleep, dp_setup): - connection = placeboify.client('datapipeline') + connection = placeboify.client("datapipeline") exists = data_pipeline.check_dp_exists(connection, dp_setup.data_pipeline_id) assert exists is True def test_check_dp_exists_false(placeboify, maybe_sleep): hypothetical_pipeline_id = "df-015440025PF7YGLDK47C" - connection = placeboify.client('datapipeline') + connection = placeboify.client("datapipeline") exists = data_pipeline.check_dp_exists(connection, hypothetical_pipeline_id) assert exists is False def test_check_dp_status(placeboify, maybe_sleep, dp_setup): - inactive_states = ['INACTIVE', 'PENDING', 'FINISHED', 'DELETING'] - connection = placeboify.client('datapipeline') + inactive_states = ["INACTIVE", "PENDING", "FINISHED", "DELETING"] + connection = placeboify.client("datapipeline") state = data_pipeline.check_dp_status(connection, dp_setup.data_pipeline_id, inactive_states) assert state is True def test_activate_pipeline(placeboify, maybe_sleep, dp_setup): # use objects to define pipeline before activating - connection = placeboify.client('datapipeline') - data_pipeline.define_pipeline(connection, - module=dp_setup.module, - objects=dp_setup.objects, - dp_id=dp_setup.data_pipeline_id) + connection = placeboify.client("datapipeline") + data_pipeline.define_pipeline( + connection, module=dp_setup.module, objects=dp_setup.objects, dp_id=dp_setup.data_pipeline_id + ) changed, _result = data_pipeline.activate_pipeline(connection, dp_setup.module) assert changed is True diff --git a/tests/unit/plugins/modules/test_directconnect_confirm_connection.py b/tests/unit/plugins/modules/test_directconnect_confirm_connection.py index 63804415db1..57502ade384 100644 --- a/tests/unit/plugins/modules/test_directconnect_confirm_connection.py +++ b/tests/unit/plugins/modules/test_directconnect_confirm_connection.py @@ -1,8 +1,10 @@ # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import pytest + try: from botocore.exceptions import ClientError except ImportError: @@ -19,10 +21,12 @@ from ansible_collections.community.aws.plugins.modules import directconnect_confirm_connection if not HAS_BOTO3: - pytestmark = pytest.mark.skip("test_directconnect_confirm_connection.py requires the `boto3` and `botocore` modules") + pytestmark = pytest.mark.skip( + "test_directconnect_confirm_connection.py requires the `boto3` and `botocore` modules" + ) -@patch('ansible_collections.amazon.aws.plugins.module_utils.core.HAS_BOTO3', new=True) +@patch("ansible_collections.amazon.aws.plugins.module_utils.core.HAS_BOTO3", new=True) @patch.object(directconnect_confirm_connection.AnsibleAWSModule, "client") class TestAWSDirectConnectConfirmConnection(ModuleTestCase): def test_missing_required_parameters(self, *args): @@ -45,22 +49,18 @@ def test_get_by_connection_id(self, mock_client): "connectionName": "ansible-test-connection", "bandwidth": "1Gbps", "ownerAccount": "123456789012", - "region": "us-west-2" + "region": "us-west-2", } ] } - set_module_args({ - "connection_id": "dxcon-fgq9rgot" - }) + set_module_args({"connection_id": "dxcon-fgq9rgot"}) with self.assertRaises(AnsibleExitJson) as exec_info: directconnect_confirm_connection.main() result = exec_info.exception.args[0] assert result["changed"] is False assert result["connection_state"] == "requested" - mock_client.return_value.describe_connections.assert_has_calls([ - call(connectionId="dxcon-fgq9rgot") - ]) + mock_client.return_value.describe_connections.assert_has_calls([call(connectionId="dxcon-fgq9rgot")]) mock_client.return_value.confirm_connection.assert_not_called() def test_get_by_name(self, mock_client): @@ -73,39 +73,31 @@ def test_get_by_name(self, mock_client): "connectionName": "ansible-test-connection", "bandwidth": "1Gbps", "ownerAccount": "123456789012", - "region": "us-west-2" + "region": "us-west-2", } ] } - set_module_args({ - "name": "ansible-test-connection" - }) + set_module_args({"name": "ansible-test-connection"}) with self.assertRaises(AnsibleExitJson) as exec_info: directconnect_confirm_connection.main() result = exec_info.exception.args[0] assert result["changed"] is False assert result["connection_state"] == "requested" - mock_client.return_value.describe_connections.assert_has_calls([ - call(), - call(connectionId="dxcon-fgq9rgot") - ]) + mock_client.return_value.describe_connections.assert_has_calls([call(), call(connectionId="dxcon-fgq9rgot")]) mock_client.return_value.confirm_connection.assert_not_called() def test_missing_connection_id(self, mock_client): mock_client.return_value.describe_connections.side_effect = ClientError( - {'Error': {'Code': 'ResourceNotFoundException'}}, 'DescribeConnection') - set_module_args({ - "connection_id": "dxcon-aaaabbbb" - }) + {"Error": {"Code": "ResourceNotFoundException"}}, "DescribeConnection" + ) + set_module_args({"connection_id": "dxcon-aaaabbbb"}) with self.assertRaises(AnsibleFailJson) as exec_info: directconnect_confirm_connection.main() result = exec_info.exception.args[0] assert result["failed"] is True - mock_client.return_value.describe_connections.assert_has_calls([ - call(connectionId="dxcon-aaaabbbb") - ]) + mock_client.return_value.describe_connections.assert_has_calls([call(connectionId="dxcon-aaaabbbb")]) def test_missing_name(self, mock_client): mock_client.return_value.describe_connections.return_value = { @@ -117,21 +109,17 @@ def test_missing_name(self, mock_client): "connectionName": "ansible-test-connection", "bandwidth": "1Gbps", "ownerAccount": "123456789012", - "region": "us-west-2" + "region": "us-west-2", } ] } - set_module_args({ - "name": "foobar" - }) + set_module_args({"name": "foobar"}) with self.assertRaises(AnsibleFailJson) as exec_info: directconnect_confirm_connection.main() result = exec_info.exception.args[0] assert result["failed"] is True - mock_client.return_value.describe_connections.assert_has_calls([ - call() - ]) + mock_client.return_value.describe_connections.assert_has_calls([call()]) def test_confirm(self, mock_client): mock_client.return_value.describe_connections.return_value = { @@ -143,22 +131,22 @@ def test_confirm(self, mock_client): "connectionName": "ansible-test-connection", "bandwidth": "1Gbps", "ownerAccount": "123456789012", - "region": "us-west-2" + "region": "us-west-2", } ] } mock_client.return_value.confirm_connection.return_value = [{}] - set_module_args({ - "connection_id": "dxcon-fgq9rgot" - }) + set_module_args({"connection_id": "dxcon-fgq9rgot"}) with self.assertRaises(AnsibleExitJson) as exec_info: directconnect_confirm_connection.main() result = exec_info.exception.args[0] assert result["changed"] is True - mock_client.return_value.describe_connections.assert_has_calls([ - call(connectionId="dxcon-fgq9rgot"), - call(connectionId="dxcon-fgq9rgot"), - call(connectionId="dxcon-fgq9rgot") - ]) + mock_client.return_value.describe_connections.assert_has_calls( + [ + call(connectionId="dxcon-fgq9rgot"), + call(connectionId="dxcon-fgq9rgot"), + call(connectionId="dxcon-fgq9rgot"), + ] + ) mock_client.return_value.confirm_connection.assert_called_once_with(connectionId="dxcon-fgq9rgot") diff --git a/tests/unit/plugins/modules/test_directconnect_connection.py b/tests/unit/plugins/modules/test_directconnect_connection.py index 65ba0a3f0c0..05a187177da 100644 --- a/tests/unit/plugins/modules/test_directconnect_connection.py +++ b/tests/unit/plugins/modules/test_directconnect_connection.py @@ -4,81 +4,86 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import pytest from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 + # Magic... Incorrectly identified by pylint as unused -from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep # pylint: disable=unused-import -from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify # pylint: disable=unused-import +# pylint: disable-next=unused-import +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep + +# pylint: disable-next=unused-import +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify from ansible_collections.community.aws.plugins.modules import directconnect_connection if not HAS_BOTO3: - pytestmark = pytest.mark.skip("test_directconnect_confirm_connection.py requires the `boto3` and `botocore` modules") + pytestmark = pytest.mark.skip( + "test_directconnect_confirm_connection.py requires the `boto3` and `botocore` modules" + ) # When rerecording these tests, create a stand alone connection with default values in us-west-2 # with the name ansible-test-connection and set connection_id to the appropriate value connection_id = "dxcon-fgq9rgot" -connection_name = 'ansible-test-connection' +connection_name = "ansible-test-connection" def test_connection_status(placeboify, maybe_sleep): - client = placeboify.client('directconnect') - status = directconnect_connection.connection_status(client, connection_id)['connection'] - assert status['connectionName'] == connection_name - assert status['connectionId'] == connection_id + client = placeboify.client("directconnect") + status = directconnect_connection.connection_status(client, connection_id)["connection"] + assert status["connectionName"] == connection_name + assert status["connectionId"] == connection_id def test_connection_exists_by_id(placeboify, maybe_sleep): - client = placeboify.client('directconnect') + client = placeboify.client("directconnect") exists = directconnect_connection.connection_exists(client, connection_id) assert exists == connection_id def test_connection_exists_by_name(placeboify, maybe_sleep): - client = placeboify.client('directconnect') + client = placeboify.client("directconnect") exists = directconnect_connection.connection_exists(client, None, connection_name) assert exists == connection_id def test_connection_does_not_exist(placeboify, maybe_sleep): - client = placeboify.client('directconnect') - exists = directconnect_connection.connection_exists(client, 'dxcon-notthere') + client = placeboify.client("directconnect") + exists = directconnect_connection.connection_exists(client, "dxcon-notthere") assert exists is False def test_changed_properties(placeboify, maybe_sleep): - client = placeboify.client('directconnect') - status = directconnect_connection.connection_status(client, connection_id)['connection'] + client = placeboify.client("directconnect") + status = directconnect_connection.connection_status(client, connection_id)["connection"] location = "differentlocation" - bandwidth = status['bandwidth'] + bandwidth = status["bandwidth"] assert directconnect_connection.changed_properties(status, location, bandwidth) is True def test_associations_are_not_updated(placeboify, maybe_sleep): - client = placeboify.client('directconnect') - status = directconnect_connection.connection_status(client, connection_id)['connection'] - lag_id = status.get('lagId') + client = placeboify.client("directconnect") + status = directconnect_connection.connection_status(client, connection_id)["connection"] + lag_id = status.get("lagId") assert directconnect_connection.update_associations(client, status, connection_id, lag_id) is False def test_create_and_delete(placeboify, maybe_sleep): - client = placeboify.client('directconnect') + client = placeboify.client("directconnect") created_conn = verify_create_works(placeboify, maybe_sleep, client) verify_delete_works(placeboify, maybe_sleep, client, created_conn) def verify_create_works(placeboify, maybe_sleep, client): - created = directconnect_connection.create_connection(client=client, - location="EqSE2", - bandwidth="1Gbps", - name="ansible-test-2", - lag_id=None) - assert created.startswith('dxcon') + created = directconnect_connection.create_connection( + client=client, location="EqSE2", bandwidth="1Gbps", name="ansible-test-2", lag_id=None + ) + assert created.startswith("dxcon") return created diff --git a/tests/unit/plugins/modules/test_directconnect_link_aggregation_group.py b/tests/unit/plugins/modules/test_directconnect_link_aggregation_group.py index 90c8d9604b6..6269e9fe073 100644 --- a/tests/unit/plugins/modules/test_directconnect_link_aggregation_group.py +++ b/tests/unit/plugins/modules/test_directconnect_link_aggregation_group.py @@ -4,7 +4,8 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import pytest @@ -12,8 +13,11 @@ import collections # Magic... Incorrectly identified by pylint as unused -from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep # pylint: disable=unused-import -from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify # pylint: disable=unused-import +# pylint: disable-next=unused-import +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep + +# pylint: disable-next=unused-import +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn @@ -22,22 +26,25 @@ from ansible_collections.community.aws.plugins.modules import directconnect_link_aggregation_group as lag_module if not HAS_BOTO3: - pytestmark = pytest.mark.skip("test_directconnect_confirm_connection.py requires the `boto3` and `botocore` modules") + pytestmark = pytest.mark.skip( + "test_directconnect_confirm_connection.py requires the `boto3` and `botocore` modules" + ) @pytest.fixture(scope="module") def dependencies(): - # each LAG dict will contain the keys: module, connections, virtual_interfaces Dependencies = collections.namedtuple("Dependencies", ["lag_1", "lag_2"]) lag_1 = dict() lag_2 = dict() - vanilla_params = {"name": "ansible_lag_1", - "location": "EqSe2", - "num_connections": 1, - "min_links": 0, - "bandwidth": "1Gbps"} + vanilla_params = { + "name": "ansible_lag_1", + "location": "EqSe2", + "num_connections": 1, + "min_links": 0, + "bandwidth": "1Gbps", + } for lag in ("ansible_lag_1", "ansible_lag_2"): params = dict(vanilla_params) @@ -49,10 +56,19 @@ def dependencies(): if os.getenv("PLACEBO_RECORD"): region, ec2_url, aws_connect_kwargs = get_aws_connection_info(lag_1["module"], boto3=True) - client = boto3_conn(lag_1["module"], conn_type="client", resource="directconnect", region=region, endpoint=ec2_url, **aws_connect_kwargs) + client = boto3_conn( + lag_1["module"], + conn_type="client", + resource="directconnect", + region=region, + endpoint=ec2_url, + **aws_connect_kwargs, + ) # See if link aggregation groups exist for name in ("ansible_lag_1", "ansible_lag_2"): - lag_id = lag_module.create_lag(client, num_connections=1, location="EqSe2", bandwidth="1Gbps", name=name, connection_id=None) + lag_id = lag_module.create_lag( + client, num_connections=1, location="EqSe2", bandwidth="1Gbps", name=name, connection_id=None + ) if name == "ansible_lag_1": lag_1["lag_id"] = lag_id lag_1["name"] = name @@ -87,10 +103,7 @@ def exit_json(self, *args, **kwargs): def test_nonexistent_lag_status(placeboify, maybe_sleep): client = placeboify.client("directconnect") - exists = lag_module.lag_exists(client=client, - lag_id="doesntexist", - lag_name="doesntexist", - verify=True) + exists = lag_module.lag_exists(client=client, lag_id="doesntexist", lag_name="doesntexist", verify=True) assert not exists @@ -103,28 +116,19 @@ def test_lag_status(placeboify, maybe_sleep, dependencies): def test_lag_exists(placeboify, maybe_sleep, dependencies): client = placeboify.client("directconnect") - exists = lag_module.lag_exists(client=client, - lag_id=dependencies.lag_1.get("lag_id"), - lag_name=None, - verify=True) + exists = lag_module.lag_exists(client=client, lag_id=dependencies.lag_1.get("lag_id"), lag_name=None, verify=True) assert exists def test_lag_exists_using_name(placeboify, maybe_sleep, dependencies): client = placeboify.client("directconnect") - exists = lag_module.lag_exists(client=client, - lag_id=None, - lag_name=dependencies.lag_1.get("name"), - verify=True) + exists = lag_module.lag_exists(client=client, lag_id=None, lag_name=dependencies.lag_1.get("name"), verify=True) assert exists def test_nonexistent_lag_does_not_exist(placeboify, maybe_sleep): client = placeboify.client("directconnect") - exists = lag_module.lag_exists(client=client, - lag_id="dxlag-XXXXXXXX", - lag_name="doesntexist", - verify=True) + exists = lag_module.lag_exists(client=client, lag_id="dxlag-XXXXXXXX", lag_name="doesntexist", verify=True) assert not exists @@ -143,19 +147,21 @@ def test_lag_changed_true_no(placeboify, maybe_sleep, dependencies): def test_update_lag(placeboify, maybe_sleep, dependencies): client = placeboify.client("directconnect") status_before = lag_module.lag_status(client=client, lag_id=dependencies.lag_2.get("lag_id")) - lag_module.update_lag(client, - lag_id=dependencies.lag_2.get("lag_id"), - lag_name="ansible_lag_2_update", - min_links=0, - wait=False, - wait_timeout=0, - num_connections=1) + lag_module.update_lag( + client, + lag_id=dependencies.lag_2.get("lag_id"), + lag_name="ansible_lag_2_update", + min_links=0, + wait=False, + wait_timeout=0, + num_connections=1, + ) status_after = lag_module.lag_status(client=client, lag_id=dependencies.lag_2.get("lag_id")) assert status_before != status_after # remove the lag name from the statuses and verify it was the only thing changed - del status_before['lagName'] - del status_after['lagName'] + del status_before["lagName"] + del status_after["lagName"] assert status_before == status_after diff --git a/tests/unit/plugins/modules/test_directconnect_virtual_interface.py b/tests/unit/plugins/modules/test_directconnect_virtual_interface.py index 4f0086421f8..096739f0ac4 100644 --- a/tests/unit/plugins/modules/test_directconnect_virtual_interface.py +++ b/tests/unit/plugins/modules/test_directconnect_virtual_interface.py @@ -4,20 +4,27 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import pytest from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 + # Magic... Incorrectly identified by pylint as unused -from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep # pylint: disable=unused-import -from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify # pylint: disable=unused-import +# pylint: disable-next=unused-import +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep + +# pylint: disable-next=unused-import +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify from ansible_collections.community.aws.plugins.modules import directconnect_virtual_interface if not HAS_BOTO3: - pytestmark = pytest.mark.skip("test_directconnect_confirm_connection.py requires the `boto3` and `botocore` modules") + pytestmark = pytest.mark.skip( + "test_directconnect_confirm_connection.py requires the `boto3` and `botocore` modules" + ) class FailException(Exception): @@ -46,10 +53,7 @@ def test_find_unique_vi_by_connection_id(placeboify, maybe_sleep): def test_find_unique_vi_by_vi_id(placeboify, maybe_sleep): client = placeboify.client("directconnect") - vi_id = directconnect_virtual_interface.find_unique_vi(client, - None, - "dxvif-aaaaaaaaa", - None) + vi_id = directconnect_virtual_interface.find_unique_vi(client, None, "dxvif-aaaaaaaaa", None) assert vi_id == "dxvif-aaaaaaaa" @@ -61,47 +65,38 @@ def test_find_unique_vi_by_name(placeboify, maybe_sleep): def test_find_unique_vi_returns_multiple(placeboify, maybe_sleep): client = placeboify.client("directconnect") - module = FakeModule(state="present", - id_to_associate="dxcon-aaaaaaaa", - public=False, - name=None) + module = FakeModule(state="present", id_to_associate="dxcon-aaaaaaaa", public=False, name=None) with pytest.raises(FailException): - directconnect_virtual_interface.ensure_state( - client, - module - ) + directconnect_virtual_interface.ensure_state(client, module) assert "Multiple virtual interfaces were found" in module.exit_kwargs["msg"] def test_find_unique_vi_returns_missing_for_vi_id(placeboify, maybe_sleep): client = placeboify.client("directconnect") - module = FakeModule(state="present", - id_to_associate=None, - public=False, - name=None, - virtual_interface_id="dxvif-aaaaaaaa") + module = FakeModule( + state="present", id_to_associate=None, public=False, name=None, virtual_interface_id="dxvif-aaaaaaaa" + ) with pytest.raises(FailException): - directconnect_virtual_interface.ensure_state( - client, - module - ) + directconnect_virtual_interface.ensure_state(client, module) assert "The virtual interface dxvif-aaaaaaaa does not exist" in module.exit_kwargs["msg"] def test_construct_public_vi(): - module = FakeModule(state="present", - id_to_associate=None, - public=True, - name="aaaaaaaa", - vlan=1, - bgp_asn=123, - authentication_key="aaaa", - customer_address="169.254.0.1/30", - amazon_address="169.254.0.2/30", - address_type="ipv4", - cidr=["10.88.0.0/30"], - virtual_gateway_id="xxxx", - direct_connect_gateway_id="yyyy") + module = FakeModule( + state="present", + id_to_associate=None, + public=True, + name="aaaaaaaa", + vlan=1, + bgp_asn=123, + authentication_key="aaaa", + customer_address="169.254.0.1/30", + amazon_address="169.254.0.2/30", + address_type="ipv4", + cidr=["10.88.0.0/30"], + virtual_gateway_id="xxxx", + direct_connect_gateway_id="yyyy", + ) vi = directconnect_virtual_interface.assemble_params_for_creating_vi(module.params) assert vi == { "virtualInterfaceName": "aaaaaaaa", @@ -111,24 +106,26 @@ def test_construct_public_vi(): "amazonAddress": "169.254.0.2/30", "customerAddress": "169.254.0.1/30", "addressFamily": "ipv4", - "routeFilterPrefixes": [{"cidr": "10.88.0.0/30"}] + "routeFilterPrefixes": [{"cidr": "10.88.0.0/30"}], } def test_construct_private_vi_with_virtual_gateway_id(): - module = FakeModule(state="present", - id_to_associate=None, - public=False, - name="aaaaaaaa", - vlan=1, - bgp_asn=123, - authentication_key="aaaa", - customer_address="169.254.0.1/30", - amazon_address="169.254.0.2/30", - address_type="ipv4", - cidr=["10.88.0.0/30"], - virtual_gateway_id="xxxx", - direct_connect_gateway_id="yyyy") + module = FakeModule( + state="present", + id_to_associate=None, + public=False, + name="aaaaaaaa", + vlan=1, + bgp_asn=123, + authentication_key="aaaa", + customer_address="169.254.0.1/30", + amazon_address="169.254.0.2/30", + address_type="ipv4", + cidr=["10.88.0.0/30"], + virtual_gateway_id="xxxx", + direct_connect_gateway_id="yyyy", + ) vi = directconnect_virtual_interface.assemble_params_for_creating_vi(module.params) assert vi == { "virtualInterfaceName": "aaaaaaaa", @@ -138,24 +135,26 @@ def test_construct_private_vi_with_virtual_gateway_id(): "amazonAddress": "169.254.0.2/30", "customerAddress": "169.254.0.1/30", "addressFamily": "ipv4", - "virtualGatewayId": "xxxx" + "virtualGatewayId": "xxxx", } def test_construct_private_vi_with_direct_connect_gateway_id(): - module = FakeModule(state="present", - id_to_associate=None, - public=False, - name="aaaaaaaa", - vlan=1, - bgp_asn=123, - authentication_key="aaaa", - customer_address="169.254.0.1/30", - amazon_address="169.254.0.2/30", - address_type="ipv4", - cidr=["10.88.0.0/30"], - virtual_gateway_id=None, - direct_connect_gateway_id="yyyy") + module = FakeModule( + state="present", + id_to_associate=None, + public=False, + name="aaaaaaaa", + vlan=1, + bgp_asn=123, + authentication_key="aaaa", + customer_address="169.254.0.1/30", + amazon_address="169.254.0.2/30", + address_type="ipv4", + cidr=["10.88.0.0/30"], + virtual_gateway_id=None, + direct_connect_gateway_id="yyyy", + ) vi = directconnect_virtual_interface.assemble_params_for_creating_vi(module.params) print(vi) assert vi == { @@ -166,26 +165,28 @@ def test_construct_private_vi_with_direct_connect_gateway_id(): "amazonAddress": "169.254.0.2/30", "customerAddress": "169.254.0.1/30", "addressFamily": "ipv4", - "directConnectGatewayId": "yyyy" + "directConnectGatewayId": "yyyy", } def test_create_public_vi(placeboify, maybe_sleep): client = placeboify.client("directconnect") - module = FakeModule(state="present", - id_to_associate='dxcon-aaaaaaaa', - virtual_interface_id=None, - public=True, - name="aaaaaaaa", - vlan=1, - bgp_asn=123, - authentication_key="aaaa", - customer_address="169.254.0.1/30", - amazon_address="169.254.0.2/30", - address_type="ipv4", - cidr=["10.88.0.0/30"], - virtual_gateway_id="xxxx", - direct_connect_gateway_id="yyyy") + module = FakeModule( + state="present", + id_to_associate="dxcon-aaaaaaaa", + virtual_interface_id=None, + public=True, + name="aaaaaaaa", + vlan=1, + bgp_asn=123, + authentication_key="aaaa", + customer_address="169.254.0.1/30", + amazon_address="169.254.0.2/30", + address_type="ipv4", + cidr=["10.88.0.0/30"], + virtual_gateway_id="xxxx", + direct_connect_gateway_id="yyyy", + ) changed, latest_state = directconnect_virtual_interface.ensure_state(client, module) assert changed is True assert latest_state is not None @@ -193,20 +194,22 @@ def test_create_public_vi(placeboify, maybe_sleep): def test_create_private_vi(placeboify, maybe_sleep): client = placeboify.client("directconnect") - module = FakeModule(state="present", - id_to_associate='dxcon-aaaaaaaa', - virtual_interface_id=None, - public=False, - name="aaaaaaaa", - vlan=1, - bgp_asn=123, - authentication_key="aaaa", - customer_address="169.254.0.1/30", - amazon_address="169.254.0.2/30", - address_type="ipv4", - cidr=["10.88.0.0/30"], - virtual_gateway_id="xxxx", - direct_connect_gateway_id="yyyy") + module = FakeModule( + state="present", + id_to_associate="dxcon-aaaaaaaa", + virtual_interface_id=None, + public=False, + name="aaaaaaaa", + vlan=1, + bgp_asn=123, + authentication_key="aaaa", + customer_address="169.254.0.1/30", + amazon_address="169.254.0.2/30", + address_type="ipv4", + cidr=["10.88.0.0/30"], + virtual_gateway_id="xxxx", + direct_connect_gateway_id="yyyy", + ) changed, latest_state = directconnect_virtual_interface.ensure_state(client, module) assert changed is True assert latest_state is not None @@ -214,20 +217,22 @@ def test_create_private_vi(placeboify, maybe_sleep): def test_delete_vi(placeboify, maybe_sleep): client = placeboify.client("directconnect") - module = FakeModule(state="absent", - id_to_associate='dxcon-aaaaaaaa', - virtual_interface_id='dxvif-aaaaaaaa', - public=False, - name="aaaaaaaa", - vlan=1, - bgp_asn=123, - authentication_key="aaaa", - customer_address="169.254.0.1/30", - amazon_address="169.254.0.2/30", - address_type="ipv4", - cidr=["10.88.0.0/30"], - virtual_gateway_id=None, - direct_connect_gateway_id="yyyy") + module = FakeModule( + state="absent", + id_to_associate="dxcon-aaaaaaaa", + virtual_interface_id="dxvif-aaaaaaaa", + public=False, + name="aaaaaaaa", + vlan=1, + bgp_asn=123, + authentication_key="aaaa", + customer_address="169.254.0.1/30", + amazon_address="169.254.0.2/30", + address_type="ipv4", + cidr=["10.88.0.0/30"], + virtual_gateway_id=None, + direct_connect_gateway_id="yyyy", + ) changed, latest_state = directconnect_virtual_interface.ensure_state(client, module) assert changed is True assert latest_state == {} diff --git a/tests/unit/plugins/modules/test_ec2_vpc_vpn.py b/tests/unit/plugins/modules/test_ec2_vpc_vpn.py index 5bdd6f25c35..6d6f7799b6d 100644 --- a/tests/unit/plugins/modules/test_ec2_vpc_vpn.py +++ b/tests/unit/plugins/modules/test_ec2_vpc_vpn.py @@ -1,15 +1,19 @@ # (c) 2017 Red Hat Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import os import pytest # Magic... Incorrectly identified by pylint as unused -from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify # pylint: disable=unused-import -from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep # pylint: disable=unused-import +# pylint: disable-next=unused-import +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify + +# pylint: disable-next=unused-import +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep import ansible_collections.amazon.aws.plugins.module_utils.retries as aws_retries from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info @@ -30,12 +34,12 @@ def __init__(self, **kwargs): def fail_json_aws(self, *args, **kwargs): self.exit_args = args self.exit_kwargs = kwargs - raise FailException('FAIL') + raise FailException("FAIL") def fail_json(self, *args, **kwargs): self.exit_args = args self.exit_kwargs = kwargs - raise FailException('FAIL') + raise FailException("FAIL") def exit_json(self, *args, **kwargs): self.exit_args = args @@ -44,36 +48,44 @@ def exit_json(self, *args, **kwargs): def get_vgw(connection): # see if two vgw exist and return them if so - vgw = connection.describe_vpn_gateways(Filters=[{'Name': 'tag:Ansible_VPN', 'Values': ['Test']}]) - if len(vgw['VpnGateways']) >= 2: - return [vgw['VpnGateways'][0]['VpnGatewayId'], vgw['VpnGateways'][1]['VpnGatewayId']] + vgw = connection.describe_vpn_gateways(Filters=[{"Name": "tag:Ansible_VPN", "Values": ["Test"]}]) + if len(vgw["VpnGateways"]) >= 2: + return [vgw["VpnGateways"][0]["VpnGatewayId"], vgw["VpnGateways"][1]["VpnGatewayId"]] # otherwise create two and return them - vgw_1 = connection.create_vpn_gateway(Type='ipsec.1') - vgw_2 = connection.create_vpn_gateway(Type='ipsec.1') + vgw_1 = connection.create_vpn_gateway(Type="ipsec.1") + vgw_2 = connection.create_vpn_gateway(Type="ipsec.1") for resource in (vgw_1, vgw_2): - connection.create_tags(Resources=[resource['VpnGateway']['VpnGatewayId']], Tags=[{'Key': 'Ansible_VPN', 'Value': 'Test'}]) - return [vgw_1['VpnGateway']['VpnGatewayId'], vgw_2['VpnGateway']['VpnGatewayId']] + connection.create_tags( + Resources=[resource["VpnGateway"]["VpnGatewayId"]], Tags=[{"Key": "Ansible_VPN", "Value": "Test"}] + ) + return [vgw_1["VpnGateway"]["VpnGatewayId"], vgw_2["VpnGateway"]["VpnGatewayId"]] def get_cgw(connection): # see if two cgw exist and return them if so - cgw = connection.describe_customer_gateways(DryRun=False, Filters=[{'Name': 'state', 'Values': ['available']}, - {'Name': 'tag:Name', 'Values': ['Ansible-CGW']}]) - if len(cgw['CustomerGateways']) >= 2: - return [cgw['CustomerGateways'][0]['CustomerGatewayId'], cgw['CustomerGateways'][1]['CustomerGatewayId']] + cgw = connection.describe_customer_gateways( + DryRun=False, + Filters=[{"Name": "state", "Values": ["available"]}, {"Name": "tag:Name", "Values": ["Ansible-CGW"]}], + ) + if len(cgw["CustomerGateways"]) >= 2: + return [cgw["CustomerGateways"][0]["CustomerGatewayId"], cgw["CustomerGateways"][1]["CustomerGatewayId"]] # otherwise create and return them - cgw_1 = connection.create_customer_gateway(DryRun=False, Type='ipsec.1', PublicIp='9.8.7.6', BgpAsn=65000) - cgw_2 = connection.create_customer_gateway(DryRun=False, Type='ipsec.1', PublicIp='5.4.3.2', BgpAsn=65000) + cgw_1 = connection.create_customer_gateway(DryRun=False, Type="ipsec.1", PublicIp="9.8.7.6", BgpAsn=65000) + cgw_2 = connection.create_customer_gateway(DryRun=False, Type="ipsec.1", PublicIp="5.4.3.2", BgpAsn=65000) for resource in (cgw_1, cgw_2): - connection.create_tags(Resources=[resource['CustomerGateway']['CustomerGatewayId']], Tags=[{'Key': 'Ansible-CGW', 'Value': 'Test'}]) - return [cgw_1['CustomerGateway']['CustomerGatewayId'], cgw_2['CustomerGateway']['CustomerGatewayId']] + connection.create_tags( + Resources=[resource["CustomerGateway"]["CustomerGatewayId"]], Tags=[{"Key": "Ansible-CGW", "Value": "Test"}] + ) + return [cgw_1["CustomerGateway"]["CustomerGatewayId"], cgw_2["CustomerGateway"]["CustomerGatewayId"]] def get_dependencies(): - if os.getenv('PLACEBO_RECORD'): + if os.getenv("PLACEBO_RECORD"): module = FakeModule(**{}) region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) + connection = boto3_conn( + module, conn_type="client", resource="ec2", region=region, endpoint=ec2_url, **aws_connect_kwargs + ) vgw = get_vgw(connection) cgw = get_cgw(connection) else: @@ -84,7 +96,7 @@ def get_dependencies(): def setup_mod_conn(placeboify, params): - conn = placeboify.client('ec2') + conn = placeboify.client("ec2") retry_decorator = aws_retries.AWSRetry.jittered_backoff() wrapped_conn = aws_retries.RetryingBotoClientWrapper(conn, retry_decorator) m = FakeModule(**params) @@ -96,23 +108,25 @@ def make_params(cgw, vgw, tags=None, filters=None, routes=None): filters = {} if filters is None else filters routes = [] if routes is None else routes - return {'customer_gateway_id': cgw, - 'static_only': True, - 'vpn_gateway_id': vgw, - 'connection_type': 'ipsec.1', - 'purge_tags': True, - 'tags': tags, - 'filters': filters, - 'routes': routes, - 'delay': 15, - 'wait_timeout': 600} + return { + "customer_gateway_id": cgw, + "static_only": True, + "vpn_gateway_id": vgw, + "connection_type": "ipsec.1", + "purge_tags": True, + "tags": tags, + "filters": filters, + "routes": routes, + "delay": 15, + "wait_timeout": 600, + } def make_conn(placeboify, module, connection): - customer_gateway_id = module.params['customer_gateway_id'] - static_only = module.params['static_only'] - vpn_gateway_id = module.params['vpn_gateway_id'] - connection_type = module.params['connection_type'] + customer_gateway_id = module.params["customer_gateway_id"] + static_only = module.params["static_only"] + vpn_gateway_id = module.params["vpn_gateway_id"] + connection_type = module.params["connection_type"] changed = True vpn = ec2_vpc_vpn.create_connection(connection, customer_gateway_id, static_only, vpn_gateway_id, connection_type) return changed, vpn @@ -123,7 +137,7 @@ def tear_down_conn(placeboify, connection, vpn_connection_id): def setup_req(placeboify, number_of_results=1): - ''' returns dependencies for VPN connections ''' + """returns dependencies for VPN connections""" assert number_of_results in (1, 2) results = [] cgw, vgw = get_dependencies() @@ -132,7 +146,7 @@ def setup_req(placeboify, number_of_results=1): m, conn = setup_mod_conn(placeboify, params) vpn = ec2_vpc_vpn.ensure_present(conn, params)[1] - results.append({'module': m, 'connection': conn, 'vpn': vpn, 'params': params}) + results.append({"module": m, "connection": conn, "vpn": vpn, "params": params}) if number_of_results == 1: return results[0] else: @@ -143,41 +157,44 @@ def test_find_connection_vpc_conn_id(placeboify, maybe_sleep): # setup dependencies for 2 vpn connections dependencies = setup_req(placeboify, 2) dep1, dep2 = dependencies[0], dependencies[1] - params1, vpn1, _m1, conn1 = dep1['params'], dep1['vpn'], dep1['module'], dep1['connection'] - _params2, vpn2, _m2, conn2 = dep2['params'], dep2['vpn'], dep2['module'], dep2['connection'] + params1, vpn1, _m1, conn1 = dep1["params"], dep1["vpn"], dep1["module"], dep1["connection"] + _params2, vpn2, _m2, conn2 = dep2["params"], dep2["vpn"], dep2["module"], dep2["connection"] # find the connection with a vpn_connection_id and assert it is the expected one - assert vpn1['VpnConnectionId'] == ec2_vpc_vpn.find_connection(conn1, params1, vpn1['VpnConnectionId'])['VpnConnectionId'] + assert ( + vpn1["VpnConnectionId"] + == ec2_vpc_vpn.find_connection(conn1, params1, vpn1["VpnConnectionId"])["VpnConnectionId"] + ) - tear_down_conn(placeboify, conn1, vpn1['VpnConnectionId']) - tear_down_conn(placeboify, conn2, vpn2['VpnConnectionId']) + tear_down_conn(placeboify, conn1, vpn1["VpnConnectionId"]) + tear_down_conn(placeboify, conn2, vpn2["VpnConnectionId"]) def test_find_connection_filters(placeboify, maybe_sleep): # setup dependencies for 2 vpn connections dependencies = setup_req(placeboify, 2) dep1, dep2 = dependencies[0], dependencies[1] - params1, vpn1, _m1, conn1 = dep1['params'], dep1['vpn'], dep1['module'], dep1['connection'] - params2, vpn2, _m2, conn2 = dep2['params'], dep2['vpn'], dep2['module'], dep2['connection'] + params1, vpn1, _m1, conn1 = dep1["params"], dep1["vpn"], dep1["module"], dep1["connection"] + params2, vpn2, _m2, conn2 = dep2["params"], dep2["vpn"], dep2["module"], dep2["connection"] # update to different tags - params1.update(tags={'Wrong': 'Tag'}) - params2.update(tags={'Correct': 'Tag'}) + params1.update(tags={"Wrong": "Tag"}) + params2.update(tags={"Correct": "Tag"}) ec2_vpc_vpn.ensure_present(conn1, params1) ec2_vpc_vpn.ensure_present(conn2, params2) # create some new parameters for a filter - params = {'filters': {'tags': {'Correct': 'Tag'}}} + params = {"filters": {"tags": {"Correct": "Tag"}}} # find the connection that has the parameters above found = ec2_vpc_vpn.find_connection(conn1, params) # assert the correct connection was found - assert found['VpnConnectionId'] == vpn2['VpnConnectionId'] + assert found["VpnConnectionId"] == vpn2["VpnConnectionId"] # delete the connections - tear_down_conn(placeboify, conn1, vpn1['VpnConnectionId']) - tear_down_conn(placeboify, conn2, vpn2['VpnConnectionId']) + tear_down_conn(placeboify, conn1, vpn1["VpnConnectionId"]) + tear_down_conn(placeboify, conn2, vpn2["VpnConnectionId"]) def test_find_connection_insufficient_filters(placeboify, maybe_sleep): @@ -185,15 +202,15 @@ def test_find_connection_insufficient_filters(placeboify, maybe_sleep): cgw, vgw = get_dependencies() # create two connections with the same tags - params = make_params(cgw[0], vgw[0], tags={'Correct': 'Tag'}) - params2 = make_params(cgw[1], vgw[1], tags={'Correct': 'Tag'}) + params = make_params(cgw[0], vgw[0], tags={"Correct": "Tag"}) + params2 = make_params(cgw[1], vgw[1], tags={"Correct": "Tag"}) m, conn = setup_mod_conn(placeboify, params) m2, conn2 = setup_mod_conn(placeboify, params2) vpn1 = ec2_vpc_vpn.ensure_present(conn, m.params)[1] vpn2 = ec2_vpc_vpn.ensure_present(conn2, m2.params)[1] # reset the parameters so only filtering by tags will occur - m.params = {'filters': {'tags': {'Correct': 'Tag'}}} + m.params = {"filters": {"tags": {"Correct": "Tag"}}} expected_message = "More than one matching VPN connection was found" # assert that multiple matching connections have been found @@ -201,13 +218,13 @@ def test_find_connection_insufficient_filters(placeboify, maybe_sleep): ec2_vpc_vpn.find_connection(conn, m.params) # delete the connections - tear_down_conn(placeboify, conn, vpn1['VpnConnectionId']) - tear_down_conn(placeboify, conn, vpn2['VpnConnectionId']) + tear_down_conn(placeboify, conn, vpn1["VpnConnectionId"]) + tear_down_conn(placeboify, conn, vpn2["VpnConnectionId"]) def test_find_connection_nonexistent(placeboify, maybe_sleep): # create parameters but don't create a connection with them - params = {'filters': {'tags': {'Correct': 'Tag'}}} + params = {"filters": {"tags": {"Correct": "Tag"}}} m, conn = setup_mod_conn(placeboify, params) # try to find a connection with matching parameters and assert None are found @@ -225,38 +242,48 @@ def test_create_connection(placeboify, maybe_sleep): # assert that changed is true and that there is a connection id assert changed is True - assert 'VpnConnectionId' in vpn + assert "VpnConnectionId" in vpn # delete connection - tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) + tear_down_conn(placeboify, conn, vpn["VpnConnectionId"]) def test_create_connection_that_exists(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) - params, vpn, _m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] + params, vpn, _m, conn = ( + dependencies["params"], + dependencies["vpn"], + dependencies["module"], + dependencies["connection"], + ) # try to recreate the same connection changed, vpn2 = ec2_vpc_vpn.ensure_present(conn, params) # nothing should have changed assert changed is False - assert vpn['VpnConnectionId'] == vpn2['VpnConnectionId'] + assert vpn["VpnConnectionId"] == vpn2["VpnConnectionId"] # delete connection - tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) + tear_down_conn(placeboify, conn, vpn["VpnConnectionId"]) def test_modify_deleted_connection(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) - _params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] + _params, vpn, m, conn = ( + dependencies["params"], + dependencies["vpn"], + dependencies["module"], + dependencies["connection"], + ) # delete it - tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) + tear_down_conn(placeboify, conn, vpn["VpnConnectionId"]) # try to update the deleted connection - m.params.update(vpn_connection_id=vpn['VpnConnectionId']) + m.params.update(vpn_connection_id=vpn["VpnConnectionId"]) expected_message = "no VPN connection available or pending with that id" with pytest.raises(ec2_vpc_vpn.VPNConnectionException, match=expected_message): ec2_vpc_vpn.ensure_present(conn, m.params) @@ -265,7 +292,12 @@ def test_modify_deleted_connection(placeboify, maybe_sleep): def test_delete_connection(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) - _params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] + _params, vpn, m, conn = ( + dependencies["params"], + dependencies["vpn"], + dependencies["module"], + dependencies["connection"], + ) # delete it changed, vpn = ec2_vpc_vpn.ensure_absent(conn, m.params) @@ -276,7 +308,7 @@ def test_delete_connection(placeboify, maybe_sleep): def test_delete_nonexistent_connection(placeboify, maybe_sleep): # create parameters and ensure any connection matching (None) is deleted - params = {'filters': {'tags': {'ThisConnection': 'DoesntExist'}}, 'delay': 15, 'wait_timeout': 600} + params = {"filters": {"tags": {"ThisConnection": "DoesntExist"}}, "delay": 15, "wait_timeout": 600} m, conn = setup_mod_conn(placeboify, params) changed, vpn = ec2_vpc_vpn.ensure_absent(conn, m.params) @@ -287,83 +319,114 @@ def test_delete_nonexistent_connection(placeboify, maybe_sleep): def test_check_for_update_tags(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) - _params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] + _params, vpn, m, conn = ( + dependencies["params"], + dependencies["vpn"], + dependencies["module"], + dependencies["connection"], + ) # add and remove a number of tags - m.params['tags'] = {'One': 'one', 'Two': 'two'} + m.params["tags"] = {"One": "one", "Two": "two"} ec2_vpc_vpn.ensure_present(conn, m.params) - m.params['tags'] = {'Two': 'two', 'Three': 'three', 'Four': 'four'} - changes = ec2_vpc_vpn.check_for_update(conn, m.params, vpn['VpnConnectionId']) + m.params["tags"] = {"Two": "two", "Three": "three", "Four": "four"} + changes = ec2_vpc_vpn.check_for_update(conn, m.params, vpn["VpnConnectionId"]) - flat_dict_changes = boto3_tag_list_to_ansible_dict(changes['tags_to_add']) - correct_changes = boto3_tag_list_to_ansible_dict([{'Key': 'Three', 'Value': 'three'}, {'Key': 'Four', 'Value': 'four'}]) + flat_dict_changes = boto3_tag_list_to_ansible_dict(changes["tags_to_add"]) + correct_changes = boto3_tag_list_to_ansible_dict( + [{"Key": "Three", "Value": "three"}, {"Key": "Four", "Value": "four"}] + ) assert flat_dict_changes == correct_changes - assert changes['tags_to_remove'] == ['One'] + assert changes["tags_to_remove"] == ["One"] # delete connection - tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) + tear_down_conn(placeboify, conn, vpn["VpnConnectionId"]) def test_check_for_update_nonmodifiable_attr(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) - params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] - current_vgw = params['vpn_gateway_id'] + params, vpn, m, conn = ( + dependencies["params"], + dependencies["vpn"], + dependencies["module"], + dependencies["connection"], + ) + current_vgw = params["vpn_gateway_id"] # update a parameter that isn't modifiable m.params.update(vpn_gateway_id="invalidchange") - expected_message = 'You cannot modify vpn_gateway_id, the current value of which is {0}. Modifiable VPN connection attributes are'.format(current_vgw) + expected_message = "You cannot modify vpn_gateway_id, the current value of which is {0}. Modifiable VPN connection attributes are".format( + current_vgw + ) with pytest.raises(ec2_vpc_vpn.VPNConnectionException, match=expected_message): - ec2_vpc_vpn.check_for_update(conn, m.params, vpn['VpnConnectionId']) + ec2_vpc_vpn.check_for_update(conn, m.params, vpn["VpnConnectionId"]) # delete connection - tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) + tear_down_conn(placeboify, conn, vpn["VpnConnectionId"]) def test_add_tags(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) - params, vpn, _m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] + params, vpn, _m, conn = ( + dependencies["params"], + dependencies["vpn"], + dependencies["module"], + dependencies["connection"], + ) # add a tag to the connection - ec2_vpc_vpn.add_tags(conn, vpn['VpnConnectionId'], add=[{'Key': 'Ansible-Test', 'Value': 'VPN'}]) + ec2_vpc_vpn.add_tags(conn, vpn["VpnConnectionId"], add=[{"Key": "Ansible-Test", "Value": "VPN"}]) # assert tag is there current_vpn = ec2_vpc_vpn.find_connection(conn, params) - assert current_vpn['Tags'] == [{'Key': 'Ansible-Test', 'Value': 'VPN'}] + assert current_vpn["Tags"] == [{"Key": "Ansible-Test", "Value": "VPN"}] # delete connection - tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) + tear_down_conn(placeboify, conn, vpn["VpnConnectionId"]) def test_remove_tags(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) - params, vpn, _m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] + params, vpn, _m, conn = ( + dependencies["params"], + dependencies["vpn"], + dependencies["module"], + dependencies["connection"], + ) # remove a tag from the connection - ec2_vpc_vpn.remove_tags(conn, vpn['VpnConnectionId'], remove=['Ansible-Test']) + ec2_vpc_vpn.remove_tags(conn, vpn["VpnConnectionId"], remove=["Ansible-Test"]) # assert the tag is gone current_vpn = ec2_vpc_vpn.find_connection(conn, params) - assert 'Tags' not in current_vpn + assert "Tags" not in current_vpn # delete connection - tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) + tear_down_conn(placeboify, conn, vpn["VpnConnectionId"]) def test_add_routes(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) - params, vpn, _m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] + params, vpn, _m, conn = ( + dependencies["params"], + dependencies["vpn"], + dependencies["module"], + dependencies["connection"], + ) # create connection with a route - ec2_vpc_vpn.add_routes(conn, vpn['VpnConnectionId'], ['195.168.2.0/24', '196.168.2.0/24']) + ec2_vpc_vpn.add_routes(conn, vpn["VpnConnectionId"], ["195.168.2.0/24", "196.168.2.0/24"]) # assert both routes are there current_vpn = ec2_vpc_vpn.find_connection(conn, params) - assert set(each['DestinationCidrBlock'] for each in current_vpn['Routes']) == set(['195.168.2.0/24', '196.168.2.0/24']) + assert set(each["DestinationCidrBlock"] for each in current_vpn["Routes"]) == set( + ["195.168.2.0/24", "196.168.2.0/24"] + ) # delete connection - tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) + tear_down_conn(placeboify, conn, vpn["VpnConnectionId"]) diff --git a/tests/unit/plugins/modules/test_ec2_win_password.py b/tests/unit/plugins/modules/test_ec2_win_password.py index 93962012010..c4285f02ab2 100644 --- a/tests/unit/plugins/modules/test_ec2_win_password.py +++ b/tests/unit/plugins/modules/test_ec2_win_password.py @@ -1,8 +1,8 @@ -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function __metaclass__ = type -''' +""" Commands to encrypt a message that can be decrypted: from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.serialization import load_pem_private_key @@ -15,7 +15,7 @@ load_pem_public_key(rsa_public_key_pem = , default_backend()) base64_cipher = public_key.encrypt('Ansible_AWS_EC2_Win_Password', PKCS1v15()) string_cipher = base64.b64encode(base64_cipher) -''' +""" import datetime import pytest @@ -33,44 +33,47 @@ from ansible_collections.community.aws.plugins.modules.ec2_win_password import setup_module_object from ansible_collections.community.aws.plugins.modules.ec2_win_password import ec2_win_password -fixture_prefix = 'tests/unit/plugins/modules/fixtures/certs' +fixture_prefix = "tests/unit/plugins/modules/fixtures/certs" if not HAS_BOTO3: pytestmark = pytest.mark.skip("test_api_gateway.py requires the `boto3` and `botocore` modules") class TestEc2WinPasswordModule(ModuleTestCase): - # Future: It would be good to generate this data on the fly and use a # temporary certificate and password. - PEM_PATH = fixture_prefix + '/ec2_win_password.pem' - UNENCRYPTED_DATA = 'Ansible_AWS_EC2_Win_Password' - ENCRYPTED_DATA = 'L2k1iFiu/TRrjGr6Rwco/T3C7xkWxUw4+YPYpGGOmP3KDdy3hT1' \ - '8RvdDJ2i0e+y7wUcH43DwbRYSlkSyALY/nzjSV9R5NChUyVs3W5' \ - '5oiVuyTKsk0lor8dFJ9z9unq14tScZHvyQ3Nx1ggOtS18S9Pk55q' \ - 'IaCXfx26ucH76VRho=' - INSTANCE_ID = 'i-12345' - - @patch('ansible_collections.community.aws.plugins.modules.s3_bucket_notification.AnsibleAWSModule.client') + PEM_PATH = fixture_prefix + "/ec2_win_password.pem" + UNENCRYPTED_DATA = "Ansible_AWS_EC2_Win_Password" + ENCRYPTED_DATA = ( + "L2k1iFiu/TRrjGr6Rwco/T3C7xkWxUw4+YPYpGGOmP3KDdy3hT1" + "8RvdDJ2i0e+y7wUcH43DwbRYSlkSyALY/nzjSV9R5NChUyVs3W5" + "5oiVuyTKsk0lor8dFJ9z9unq14tScZHvyQ3Nx1ggOtS18S9Pk55q" + "IaCXfx26ucH76VRho=" + ) + INSTANCE_ID = "i-12345" + + @patch("ansible_collections.community.aws.plugins.modules.s3_bucket_notification.AnsibleAWSModule.client") def test_decryption(self, mock_client): - path = self.PEM_PATH - with open(path, 'r') as f: + with open(path, "r") as f: pem = to_text(f.read()) with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({'instance_id': self.INSTANCE_ID, - 'key_data': pem, - }) + set_module_args( + { + "instance_id": self.INSTANCE_ID, + "key_data": pem, + } + ) module = setup_module_object() mock_client().get_password_data.return_value = { - 'InstanceId': self.INSTANCE_ID, - 'PasswordData': self.ENCRYPTED_DATA, - 'Timestamp': datetime.datetime.now(), + "InstanceId": self.INSTANCE_ID, + "PasswordData": self.ENCRYPTED_DATA, + "Timestamp": datetime.datetime.now(), } ec2_win_password(module) self.assertEqual( - exec_info.exception.args[0]['win_password'], + exec_info.exception.args[0]["win_password"], to_bytes(self.UNENCRYPTED_DATA), ) diff --git a/tests/unit/plugins/modules/test_iam_password_policy.py b/tests/unit/plugins/modules/test_iam_password_policy.py index 11de7f47782..49f6f3ec7d2 100644 --- a/tests/unit/plugins/modules/test_iam_password_policy.py +++ b/tests/unit/plugins/modules/test_iam_password_policy.py @@ -1,5 +1,6 @@ # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import json @@ -11,20 +12,22 @@ def test_warn_if_state_not_specified(capsys): - set_module_args({ - "min_pw_length": "8", - "require_symbols": "false", - "require_numbers": "true", - "require_uppercase": "true", - "require_lowercase": "true", - "allow_pw_change": "true", - "pw_max_age": "60", - "pw_reuse_prevent": "5", - "pw_expire": "false" - }) + set_module_args( + { + "min_pw_length": "8", + "require_symbols": "false", + "require_numbers": "true", + "require_uppercase": "true", + "require_lowercase": "true", + "allow_pw_change": "true", + "pw_max_age": "60", + "pw_reuse_prevent": "5", + "pw_expire": "false", + } + ) with pytest.raises(SystemExit): iam_password_policy.main() captured = capsys.readouterr() output = json.loads(captured.out) - assert 'missing required arguments' in output.get('msg', '') + assert "missing required arguments" in output.get("msg", "") diff --git a/tests/unit/plugins/modules/test_opensearch.py b/tests/unit/plugins/modules/test_opensearch.py index 836e2cf0788..06b50b5ca57 100644 --- a/tests/unit/plugins/modules/test_opensearch.py +++ b/tests/unit/plugins/modules/test_opensearch.py @@ -1,7 +1,8 @@ # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import functools @@ -13,74 +14,71 @@ def test_parse_version(): test_versions = [ - ['Elasticsearch_5.5', {'engine_type': 'Elasticsearch', 'major': 5, 'minor': 5}], - ['Elasticsearch_7.1', {'engine_type': 'Elasticsearch', 'major': 7, 'minor': 1}], - ['Elasticsearch_7.10', {'engine_type': 'Elasticsearch', 'major': 7, 'minor': 10}], - ['OpenSearch_1.0', {'engine_type': 'OpenSearch', 'major': 1, 'minor': 0}], - ['OpenSearch_1.1', {'engine_type': 'OpenSearch', 'major': 1, 'minor': 1}], - ['OpenSearch_a.b', None], - ['OpenSearch_1.b', None], - ['OpenSearch_1-1', None], - ['OpenSearch_1.1.2', None], - ['OpenSearch_foo_1.1', None], - ['OpenSearch_1', None], - ['OpenSearch-1.0', None], - ['Foo_1.0', None], + ["Elasticsearch_5.5", {"engine_type": "Elasticsearch", "major": 5, "minor": 5}], + ["Elasticsearch_7.1", {"engine_type": "Elasticsearch", "major": 7, "minor": 1}], + ["Elasticsearch_7.10", {"engine_type": "Elasticsearch", "major": 7, "minor": 10}], + ["OpenSearch_1.0", {"engine_type": "OpenSearch", "major": 1, "minor": 0}], + ["OpenSearch_1.1", {"engine_type": "OpenSearch", "major": 1, "minor": 1}], + ["OpenSearch_a.b", None], + ["OpenSearch_1.b", None], + ["OpenSearch_1-1", None], + ["OpenSearch_1.1.2", None], + ["OpenSearch_foo_1.1", None], + ["OpenSearch_1", None], + ["OpenSearch-1.0", None], + ["Foo_1.0", None], ] for expected in test_versions: ret = parse_version(expected[0]) if ret != expected[1]: - raise AssertionError( - f"parse_version({expected[0]} returned {ret}, expected {expected[1]}") + raise AssertionError(f"parse_version({expected[0]} returned {ret}, expected {expected[1]}") def test_version_compare(): test_versions = [ - ['Elasticsearch_5.5', 'Elasticsearch_5.5', 0], - ['Elasticsearch_5.5', 'Elasticsearch_7.1', -1], - ['Elasticsearch_7.1', 'Elasticsearch_7.1', 0], - ['Elasticsearch_7.1', 'Elasticsearch_7.2', -1], - ['Elasticsearch_7.1', 'Elasticsearch_7.10', -1], - ['Elasticsearch_7.2', 'Elasticsearch_7.10', -1], - ['Elasticsearch_7.10', 'Elasticsearch_7.2', 1], - ['Elasticsearch_7.2', 'Elasticsearch_5.5', 1], - ['Elasticsearch_7.2', 'OpenSearch_1.0', -1], - ['Elasticsearch_7.2', 'OpenSearch_1.1', -1], - ['OpenSearch_1.1', 'OpenSearch_1.1', 0], - ['OpenSearch_1.0', 'OpenSearch_1.1', -1], - ['OpenSearch_1.1', 'OpenSearch_1.0', 1], - ['foo_1.1', 'OpenSearch_1.0', -1], - ['Elasticsearch_5.5', 'foo_1.0', 1], + ["Elasticsearch_5.5", "Elasticsearch_5.5", 0], + ["Elasticsearch_5.5", "Elasticsearch_7.1", -1], + ["Elasticsearch_7.1", "Elasticsearch_7.1", 0], + ["Elasticsearch_7.1", "Elasticsearch_7.2", -1], + ["Elasticsearch_7.1", "Elasticsearch_7.10", -1], + ["Elasticsearch_7.2", "Elasticsearch_7.10", -1], + ["Elasticsearch_7.10", "Elasticsearch_7.2", 1], + ["Elasticsearch_7.2", "Elasticsearch_5.5", 1], + ["Elasticsearch_7.2", "OpenSearch_1.0", -1], + ["Elasticsearch_7.2", "OpenSearch_1.1", -1], + ["OpenSearch_1.1", "OpenSearch_1.1", 0], + ["OpenSearch_1.0", "OpenSearch_1.1", -1], + ["OpenSearch_1.1", "OpenSearch_1.0", 1], + ["foo_1.1", "OpenSearch_1.0", -1], + ["Elasticsearch_5.5", "foo_1.0", 1], ] for v in test_versions: ret = compare_domain_versions(v[0], v[1]) if ret != v[2]: - raise AssertionError( - f"compare({v[0]}, {v[1]} returned {ret}, expected {v[2]}") + raise AssertionError(f"compare({v[0]}, {v[1]} returned {ret}, expected {v[2]}") def test_sort_versions(): input_versions = [ - 'Elasticsearch_5.6', - 'Elasticsearch_5.5', - 'Elasticsearch_7.10', - 'Elasticsearch_7.2', - 'foo_10.5', - 'OpenSearch_1.1', - 'OpenSearch_1.0', - 'Elasticsearch_7.3', + "Elasticsearch_5.6", + "Elasticsearch_5.5", + "Elasticsearch_7.10", + "Elasticsearch_7.2", + "foo_10.5", + "OpenSearch_1.1", + "OpenSearch_1.0", + "Elasticsearch_7.3", ] expected_versions = [ - 'foo_10.5', - 'Elasticsearch_5.5', - 'Elasticsearch_5.6', - 'Elasticsearch_7.2', - 'Elasticsearch_7.3', - 'Elasticsearch_7.10', - 'OpenSearch_1.0', - 'OpenSearch_1.1', + "foo_10.5", + "Elasticsearch_5.5", + "Elasticsearch_5.6", + "Elasticsearch_7.2", + "Elasticsearch_7.3", + "Elasticsearch_7.10", + "OpenSearch_1.0", + "OpenSearch_1.1", ] input_versions = sorted(input_versions, key=functools.cmp_to_key(compare_domain_versions)) if input_versions != expected_versions: - raise AssertionError( - f"Expected {expected_versions}, got {input_versions}") + raise AssertionError(f"Expected {expected_versions}, got {input_versions}") diff --git a/tests/unit/plugins/modules/test_redshift_cross_region_snapshots.py b/tests/unit/plugins/modules/test_redshift_cross_region_snapshots.py index 7b22d5b00b3..661c58e0698 100644 --- a/tests/unit/plugins/modules/test_redshift_cross_region_snapshots.py +++ b/tests/unit/plugins/modules/test_redshift_cross_region_snapshots.py @@ -1,40 +1,39 @@ # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type from ansible_collections.community.aws.plugins.modules import redshift_cross_region_snapshots as rcrs mock_status_enabled = { - 'SnapshotCopyGrantName': 'snapshot-us-east-1-to-us-west-2', - 'DestinationRegion': 'us-west-2', - 'RetentionPeriod': 1, + "SnapshotCopyGrantName": "snapshot-us-east-1-to-us-west-2", + "DestinationRegion": "us-west-2", + "RetentionPeriod": 1, } mock_status_disabled = {} mock_request_illegal = { - 'snapshot_copy_grant': 'changed', - 'destination_region': 'us-west-2', - 'snapshot_retention_period': 1 + "snapshot_copy_grant": "changed", + "destination_region": "us-west-2", + "snapshot_retention_period": 1, } mock_request_update = { - 'snapshot_copy_grant': 'snapshot-us-east-1-to-us-west-2', - 'destination_region': 'us-west-2', - 'snapshot_retention_period': 3 + "snapshot_copy_grant": "snapshot-us-east-1-to-us-west-2", + "destination_region": "us-west-2", + "snapshot_retention_period": 3, } mock_request_no_update = { - 'snapshot_copy_grant': 'snapshot-us-east-1-to-us-west-2', - 'destination_region': 'us-west-2', - 'snapshot_retention_period': 1 + "snapshot_copy_grant": "snapshot-us-east-1-to-us-west-2", + "destination_region": "us-west-2", + "snapshot_retention_period": 1, } def test_fail_at_unsupported_operations(): - response = rcrs.requesting_unsupported_modifications( - mock_status_enabled, mock_request_illegal - ) + response = rcrs.requesting_unsupported_modifications(mock_status_enabled, mock_request_illegal) assert response is True @@ -44,9 +43,7 @@ def test_needs_update_true(): def test_no_change(): - response = rcrs.requesting_unsupported_modifications( - mock_status_enabled, mock_request_no_update - ) + response = rcrs.requesting_unsupported_modifications(mock_status_enabled, mock_request_no_update) needs_update_response = rcrs.needs_update(mock_status_enabled, mock_request_no_update) assert response is False assert needs_update_response is False diff --git a/tests/unit/plugins/modules/test_ssm_inventory_info.py b/tests/unit/plugins/modules/test_ssm_inventory_info.py index 6c8559ae77b..0437adc1f8d 100644 --- a/tests/unit/plugins/modules/test_ssm_inventory_info.py +++ b/tests/unit/plugins/modules/test_ssm_inventory_info.py @@ -21,9 +21,7 @@ def test_get_ssm_inventory(): filters = MagicMock() assert get_ssm_inventory(connection, filters) == inventory_response - connection.get_inventory.assert_called_once_with( - Filters=filters - ) + connection.get_inventory.assert_called_once_with(Filters=filters) def test_get_ssm_inventory_failure(): @@ -35,109 +33,83 @@ def test_get_ssm_inventory_failure(): get_ssm_inventory(connection, filters) -@patch('ansible_collections.community.aws.plugins.modules.ssm_inventory_info.get_ssm_inventory') +@patch("ansible_collections.community.aws.plugins.modules.ssm_inventory_info.get_ssm_inventory") def test_execute_module(m_get_ssm_inventory): - instance_id = "i-0202020202020202" aws_inventory = { - 'AgentType': 'amazon-ssm-agent', - 'AgentVersion': '3.2.582.0', - 'ComputerName': 'ip-172-31-44-166.ec2.internal', - 'InstanceId': 'i-039eb9b1f55934ab6', - 'InstanceStatus': 'Active', - 'IpAddress': '172.31.44.166', - 'PlatformName': 'Fedora Linux', - 'PlatformType': 'Linux', - 'PlatformVersion': '37', - 'ResourceType': 'EC2Instance' + "AgentType": "amazon-ssm-agent", + "AgentVersion": "3.2.582.0", + "ComputerName": "ip-172-31-44-166.ec2.internal", + "InstanceId": "i-039eb9b1f55934ab6", + "InstanceStatus": "Active", + "IpAddress": "172.31.44.166", + "PlatformName": "Fedora Linux", + "PlatformType": "Linux", + "PlatformVersion": "37", + "ResourceType": "EC2Instance", } ansible_inventory = { - 'agent_type': 'amazon-ssm-agent', - 'agent_version': '3.2.582.0', - 'computer_name': 'ip-172-31-44-166.ec2.internal', - 'instance_id': 'i-039eb9b1f55934ab6', - 'instance_status': 'Active', - 'ip_address': '172.31.44.166', - 'platform_name': 'Fedora Linux', - 'platform_type': 'Linux', - 'platform_version': '37', - 'resource_type': 'EC2Instance' + "agent_type": "amazon-ssm-agent", + "agent_version": "3.2.582.0", + "computer_name": "ip-172-31-44-166.ec2.internal", + "instance_id": "i-039eb9b1f55934ab6", + "instance_status": "Active", + "ip_address": "172.31.44.166", + "platform_name": "Fedora Linux", + "platform_type": "Linux", + "platform_version": "37", + "resource_type": "EC2Instance", } m_get_ssm_inventory.return_value = { - "Entities": [ - { - 'Id': instance_id, - "Data": { - "AWS:InstanceInformation": {"Content": [aws_inventory]} - } - } - ], - "Status": 200 + "Entities": [{"Id": instance_id, "Data": {"AWS:InstanceInformation": {"Content": [aws_inventory]}}}], + "Status": 200, } connection = MagicMock() module = MagicMock() - module.params = dict( - instance_id=instance_id - ) + module.params = dict(instance_id=instance_id) module.exit_json.side_effect = SystemExit(1) module.fail_json_aws.side_effect = SystemError(2) with pytest.raises(SystemExit): execute_module(module, connection) - module.exit_json.assert_called_once_with( - changed=False, ssm_inventory=ansible_inventory - ) + module.exit_json.assert_called_once_with(changed=False, ssm_inventory=ansible_inventory) -@patch('ansible_collections.community.aws.plugins.modules.ssm_inventory_info.get_ssm_inventory') +@patch("ansible_collections.community.aws.plugins.modules.ssm_inventory_info.get_ssm_inventory") def test_execute_module_no_data(m_get_ssm_inventory): - instance_id = "i-0202020202020202" m_get_ssm_inventory.return_value = { - "Entities": [ - { - 'Id': instance_id, - "Data": {} - } - ], + "Entities": [{"Id": instance_id, "Data": {}}], } connection = MagicMock() module = MagicMock() - module.params = dict( - instance_id=instance_id - ) + module.params = dict(instance_id=instance_id) module.exit_json.side_effect = SystemExit(1) module.fail_json_aws.side_effect = SystemError(2) with pytest.raises(SystemExit): execute_module(module, connection) - module.exit_json.assert_called_once_with( - changed=False, ssm_inventory={} - ) + module.exit_json.assert_called_once_with(changed=False, ssm_inventory={}) -@patch('ansible_collections.community.aws.plugins.modules.ssm_inventory_info.get_ssm_inventory') +@patch("ansible_collections.community.aws.plugins.modules.ssm_inventory_info.get_ssm_inventory") def test_execute_module_failure(m_get_ssm_inventory): - instance_id = "i-0202020202020202" m_get_ssm_inventory.side_effect = SsmInventoryInfoFailure( - exc=BotoCoreError(error="failed", operation="get_ssm_inventory"), - msg="get_ssm_inventory() failed." + exc=BotoCoreError(error="failed", operation="get_ssm_inventory"), msg="get_ssm_inventory() failed." ) connection = MagicMock() module = MagicMock() - module.params = dict( - instance_id=instance_id - ) + module.params = dict(instance_id=instance_id) module.exit_json.side_effect = SystemExit(1) module.fail_json_aws.side_effect = SystemError(2) diff --git a/tests/unit/plugins/modules/utils.py b/tests/unit/plugins/modules/utils.py index 026bf2549d2..0ba215f2410 100644 --- a/tests/unit/plugins/modules/utils.py +++ b/tests/unit/plugins/modules/utils.py @@ -1,6 +1,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import json @@ -12,12 +13,12 @@ def set_module_args(args): - if '_ansible_remote_tmp' not in args: - args['_ansible_remote_tmp'] = '/tmp' - if '_ansible_keep_remote_files' not in args: - args['_ansible_keep_remote_files'] = False + if "_ansible_remote_tmp" not in args: + args["_ansible_remote_tmp"] = "/tmp" + if "_ansible_keep_remote_files" not in args: + args["_ansible_keep_remote_files"] = False - args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + args = json.dumps({"ANSIBLE_MODULE_ARGS": args}) basic._ANSIBLE_ARGS = to_bytes(args) @@ -30,22 +31,21 @@ class AnsibleFailJson(Exception): def exit_json(*args, **kwargs): - if 'changed' not in kwargs: - kwargs['changed'] = False + if "changed" not in kwargs: + kwargs["changed"] = False raise AnsibleExitJson(kwargs) def fail_json(*args, **kwargs): - kwargs['failed'] = True + kwargs["failed"] = True raise AnsibleFailJson(kwargs) class ModuleTestCase(unittest.TestCase): - def setUp(self): self.mock_module = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) self.mock_module.start() - self.mock_sleep = patch('time.sleep') + self.mock_sleep = patch("time.sleep") self.mock_sleep.start() set_module_args({}) self.addCleanup(self.mock_module.stop)