From ca99319a65178b0fad48bba0f3b1caf2416df753 Mon Sep 17 00:00:00 2001 From: Himani Anil Deshpande <79726937+himani2411@users.noreply.github.com> Date: Fri, 16 Dec 2022 14:59:20 -0500 Subject: [PATCH] [3.x] Add Integration tests for Resource Prefix (#4652) * Add Integration tests for Resource Prefix Add Integration test cases and config file for Iam Resource Prefix in test_iam.py Add user-role-rp.cfn.yaml to provide a User Role for creating test clusters Add the test case in pcluster3.yaml config for Jenkins tests. * Remove File deleteS3.py * Change according to PR Update the pcluster.config.yaml file to remove Iam section and use _inject_resource_in_config() to inject Iam and ResourcePrefix Section Change scope of initialize and register_prefix_cli_credentials from class to default(function) level Update _test_iam_resource_in_cluster() to add Cluster creation verification Update test_iam_resource_prefix to remove duplication of user-role-rp for each value of iam_resource_prefix_list test and improve performance. Change position of user-role-iam-resource-prefix.cfn.yaml to the tests folder directory Remove update_config variable from test_iam_resource_prefix PR Link: https://github.com/aws/aws-parallelcluster/pull/4652 * Change Iam Resource Prefix tests to handle one test case Add Iam/ResourcePrefix in pcluster.config.yaml Change user-role-iam-resource-prefix.cfn.yaml to handle /path-prefix/name-prefix- Iam Resource Prefix Add /path-prefix/name-prefix as the only pytest parameter and remove use_default_iam_credentials parameter check condition from initialize_resource_prefix_cli_creds * Add Test specific CLI credentials in cluster creation Add test specific CLI credentials while creation of cluster and run pcluster commands. Change ClusterFactory's create_cluster() to have test specific CLI credentials as arguments Change user-role-iam-resource-prefix.cfn.yaml to handle /path-prefix/name-prefix- Iam Resource Prefix Remove register_resource_prefix_cli_credentials as it will affect parallel tests running in same region. * Remove unnecessary methods and keyword Arguments Remove _inject_resource_in_config() which is unused for this PR( part of another PR) Revert run_command() to its original definition Update run_pcluster_command() to handle switching and KeyError for credential_arn Remove unnecessary key word arguments passed from create_cluster() to run_pcluster_command() Reference PR: https://github.com/aws/aws-parallelcluster/pull/4652 * Remove custom_cli_credentials from ClusterFactory Removing custom_cli_credentials from Cluster Factory class member. Reference PR: https://github.com/aws/aws-parallelcluster/pull/4652 * Revert change in run_pcluster_command Revert the changes in indentation and scope of if clause in run_pcluster_command() Reference PR: https://github.com/aws/aws-parallelcluster/pull/4652 Co-authored-by: Himani Deshpande --- tests/integration-tests/clusters_factory.py | 48 +- .../configs/common/common.yaml | 6 + .../integration-tests/configs/pcluster3.yaml | 6 + tests/integration-tests/conftest.py | 1 + .../framework/credential_providers.py | 6 +- tests/integration-tests/tests/iam/test_iam.py | 132 ++- .../pcluster.config.yaml | 41 + .../user-role-iam-resource-prefix.cfn.yaml | 785 ++++++++++++++++++ tests/integration-tests/utils.py | 10 +- 9 files changed, 1014 insertions(+), 21 deletions(-) create mode 100644 tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/pcluster.config.yaml create mode 100644 tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/user-role-iam-resource-prefix.cfn.yaml diff --git a/tests/integration-tests/clusters_factory.py b/tests/integration-tests/clusters_factory.py index 16c57930d4..0b0c8832fc 100644 --- a/tests/integration-tests/clusters_factory.py +++ b/tests/integration-tests/clusters_factory.py @@ -44,7 +44,7 @@ def wrapper(*args, **kwargs): class Cluster: """Contain all static and dynamic data related to a cluster instance.""" - def __init__(self, name, ssh_key, config_file, region): + def __init__(self, name, ssh_key, config_file, region, custom_cli_credentials=None): self.name = name self.config_file = config_file self.ssh_key = ssh_key @@ -57,6 +57,7 @@ def __init__(self, name, ssh_key, config_file, region): self.__cfn_outputs = None self.__cfn_resources = None self.__cfn_stack_arn = None + self.custom_cli_credentials = custom_cli_credentials def __repr__(self): attrs = ", ".join(["{key}={value}".format(key=key, value=repr(value)) for key, value in self.__dict__.items()]) @@ -89,7 +90,12 @@ def update(self, config_file, raise_on_error=True, log_error=True, **kwargs): # TODO Remove the validator suppression below once the plugin scheduler is officially supported if self.config["Scheduling"]["Scheduler"] == "plugin": command.extend(["--suppress-validators", "type:SchedulerValidator"]) - result = run_pcluster_command(command, raise_on_error=raise_on_error, log_error=log_error) + result = run_pcluster_command( + command, + raise_on_error=raise_on_error, + log_error=log_error, + custom_cli_credentials=self.custom_cli_credentials, + ) logging.info("update-cluster response: %s", result.stdout) response = json.loads(result.stdout) if response.get("cloudFormationStackStatus") != "UPDATE_COMPLETE": @@ -130,7 +136,7 @@ def delete(self, delete_logs=False): logging.warning("CloudWatch logs for cluster %s are preserved due to failure.", self.name) try: self.cfn_stack_arn # Cache cfn_stack_arn attribute before stack deletion - result = run_pcluster_command(cmd_args, log_error=False) + result = run_pcluster_command(cmd_args, log_error=False, custom_cli_credentials=self.custom_cli_credentials) if "DELETE_FAILED" in result.stdout: error = "Cluster deletion failed for {0} with output: {1}".format(self.name, result.stdout) logging.error(error) @@ -153,7 +159,7 @@ def start(self): else: # slurm and scheduler plugin case cmd_args.append("START_REQUESTED") try: - result = run_pcluster_command(cmd_args, log_error=False) + result = run_pcluster_command(cmd_args, log_error=False, custom_cli_credentials=self.custom_cli_credentials) logging.info("Cluster {0} started successfully".format(self.name)) return result.stdout except subprocess.CalledProcessError as e: @@ -169,7 +175,7 @@ def stop(self): else: # slurm and scheduler plugin case cmd_args.append("STOP_REQUESTED") try: - result = run_pcluster_command(cmd_args, log_error=False) + result = run_pcluster_command(cmd_args, log_error=False, custom_cli_credentials=self.custom_cli_credentials) logging.info("Cluster {0} stopped successfully".format(self.name)) return result.stdout except subprocess.CalledProcessError as e: @@ -180,7 +186,7 @@ def describe_cluster(self): """Run pcluster describe-cluster and return the result.""" cmd_args = ["pcluster", "describe-cluster", "--cluster-name", self.name] try: - result = run_pcluster_command(cmd_args, log_error=False) + result = run_pcluster_command(cmd_args, log_error=False, custom_cli_credentials=self.custom_cli_credentials) response = json.loads(result.stdout) logging.info("Get cluster {0} status successfully".format(self.name)) return response @@ -192,7 +198,7 @@ def describe_compute_fleet(self): """Run pcluster describe-compute-fleet and return the result.""" cmd_args = ["pcluster", "describe-compute-fleet", "--cluster-name", self.name] try: - result = run_pcluster_command(cmd_args, log_error=False) + result = run_pcluster_command(cmd_args, log_error=False, custom_cli_credentials=self.custom_cli_credentials) response = json.loads(result.stdout) logging.info("Describe cluster %s compute fleet successfully", self.name) return response @@ -216,7 +222,7 @@ def describe_cluster_instances(self, node_type=None, queue_name=None): if queue_name: cmd_args.extend(["--queue-name", queue_name]) try: - result = run_pcluster_command(cmd_args, log_error=False) + result = run_pcluster_command(cmd_args, log_error=False, custom_cli_credentials=self.custom_cli_credentials) response = json.loads(result.stdout) logging.info("Get cluster {0} instances successfully".format(self.name)) return response["instances"] @@ -239,7 +245,7 @@ def export_logs(self, bucket, output_file=None, bucket_prefix=None, filters=None if filters: cmd_args += ["--filters", filters] try: - result = run_pcluster_command(cmd_args, log_error=False) + result = run_pcluster_command(cmd_args, log_error=False, custom_cli_credentials=self.custom_cli_credentials) response = json.loads(result.stdout) logging.info("Cluster's logs exported successfully") return response @@ -253,7 +259,7 @@ def list_log_streams(self, next_token=None): if next_token: cmd_args.extend(["--next-token", next_token]) try: - result = run_pcluster_command(cmd_args, log_error=False) + result = run_pcluster_command(cmd_args, log_error=False, custom_cli_credentials=self.custom_cli_credentials) response = json.loads(result.stdout) logging.info("Cluster's logs listed successfully") return response @@ -281,7 +287,7 @@ def get_log_events(self, log_stream, **args): cmd_args.extend([f"--{kebab_case(k)}", str(val)]) try: - result = run_pcluster_command(cmd_args, log_error=False) + result = run_pcluster_command(cmd_args, log_error=False, custom_cli_credentials=self.custom_cli_credentials) response = json.loads(result.stdout) logging.info("Log events retrieved successfully") return response @@ -296,7 +302,7 @@ def get_stack_events(self, **args): cmd_args.extend([f"--{kebab_case(k)}", str(val)]) try: - result = run_pcluster_command(cmd_args, log_error=False) + result = run_pcluster_command(cmd_args, log_error=False, custom_cli_credentials=self.custom_cli_credentials) response = json.loads(result.stdout) logging.info("Stack events retrieved successfully") return response @@ -419,8 +425,13 @@ def create_cluster(self, cluster, log_error=True, raise_on_error=True, **kwargs) logging.info("Creating cluster {0} with config {1}".format(name, cluster.config_file)) command, wait = self._build_command(cluster, kwargs) try: - result = run_pcluster_command(command, timeout=7200, raise_on_error=raise_on_error, log_error=log_error) - + result = run_pcluster_command( + command, + timeout=7200, + raise_on_error=raise_on_error, + log_error=log_error, + custom_cli_credentials=kwargs.get("custom_cli_credentials"), + ) logging.info("create-cluster response: %s", result.stdout) response = json.loads(result.stdout) if wait: @@ -470,10 +481,11 @@ def _build_command(cluster, kwargs): kwargs["suppress_validators"] = validators_list for k, val in kwargs.items(): - if isinstance(val, (list, tuple)): - command.extend([f"--{kebab_case(k)}"] + list(map(str, val))) - else: - command.extend([f"--{kebab_case(k)}", str(val)]) + if k != "custom_cli_credentials": + if isinstance(val, (list, tuple)): + command.extend([f"--{kebab_case(k)}"] + list(map(str, val))) + else: + command.extend([f"--{kebab_case(k)}", str(val)]) return command, wait diff --git a/tests/integration-tests/configs/common/common.yaml b/tests/integration-tests/configs/common/common.yaml index 9e05cefe6a..1fbb92a8f7 100644 --- a/tests/integration-tests/configs/common/common.yaml +++ b/tests/integration-tests/configs/common/common.yaml @@ -294,6 +294,12 @@ iam: instances: {{ common.INSTANCES_DEFAULT_X86 }} oss: ["alinux2"] schedulers: ["slurm", "awsbatch"] + test_iam.py::test_iam_resource_prefix: + dimensions: + - regions: [ "eu-north-1" ] + instances: {{ common.INSTANCES_DEFAULT_X86 }} + oss: [ "alinux2" ] + schedulers: [ "slurm" ] intel_hpc: test_intel_hpc.py::test_intel_hpc: dimensions: diff --git a/tests/integration-tests/configs/pcluster3.yaml b/tests/integration-tests/configs/pcluster3.yaml index f39050eac8..c780931cb1 100644 --- a/tests/integration-tests/configs/pcluster3.yaml +++ b/tests/integration-tests/configs/pcluster3.yaml @@ -50,6 +50,12 @@ test-suites: instances: {{ common.INSTANCES_DEFAULT_X86 }} oss: ["alinux2"] schedulers: ["slurm"] + test_iam.py::test_iam_resource_prefix: + dimensions: + - regions: [ "eu-north-1" ] + schedulers: [ "slurm" ] + oss: [ "alinux2" ] + instances: {{ common.INSTANCES_DEFAULT_X86 }} schedulers: test_awsbatch.py::test_awsbatch: dimensions: diff --git a/tests/integration-tests/conftest.py b/tests/integration-tests/conftest.py index b1e8cc21fb..8ae06d9d9d 100644 --- a/tests/integration-tests/conftest.py +++ b/tests/integration-tests/conftest.py @@ -386,6 +386,7 @@ def _cluster_factory(cluster_config, upper_case_cluster_name=False, **kwargs): config_file=cluster_config, ssh_key=request.config.getoption("key_path"), region=region, + custom_cli_credentials=kwargs.get("custom_cli_credentials"), ) if not request.config.getoption("cluster"): cluster.creation_response = factory.create_cluster(cluster, **kwargs) diff --git a/tests/integration-tests/framework/credential_providers.py b/tests/integration-tests/framework/credential_providers.py index c281678b1f..d450b08f55 100644 --- a/tests/integration-tests/framework/credential_providers.py +++ b/tests/integration-tests/framework/credential_providers.py @@ -25,12 +25,16 @@ def register_cli_credentials_for_region(region, iam_role): def run_pcluster_command(*args, **kwargs): """Run a command after assuming the role configured through register_cli_credentials_for_region.""" + region = kwargs.get("region") if not region: region = os.environ["AWS_DEFAULT_REGION"] if region in cli_credentials: - with sts_credential_provider(region, cli_credentials[region]): + with sts_credential_provider( + region, credential_arn=kwargs.get("custom_cli_credentials") or cli_credentials.get(region) + ): + kwargs.pop("custom_cli_credentials", None) return run_command(*args, **kwargs) else: return run_command(*args, **kwargs) diff --git a/tests/integration-tests/tests/iam/test_iam.py b/tests/integration-tests/tests/iam/test_iam.py index c8997cb451..6cd560a845 100644 --- a/tests/integration-tests/tests/iam/test_iam.py +++ b/tests/integration-tests/tests/iam/test_iam.py @@ -17,9 +17,11 @@ import pytest import yaml from assertpy import assert_that +from cfn_stacks_factory import CfnStack, CfnStacksFactory +from framework.tests_configuration.config_utils import get_all_regions from remote_command_executor import RemoteCommandExecutor from s3_common_utils import check_s3_read_resource, check_s3_read_write_resource, get_policy_resources -from utils import wait_for_computefleet_changed +from utils import generate_stack_name, wait_for_computefleet_changed from tests.common.assertions import assert_no_errors_in_logs from tests.schedulers.test_awsbatch import _test_job_submission as _test_job_submission_awsbatch @@ -332,3 +334,131 @@ def test_s3_read_write_resource(region, pcluster_config_reader, s3_bucket_factor # Check S3 resources check_s3_read_resource(region, cluster, get_policy_resources(config, enable_write_access=False)) check_s3_read_write_resource(region, cluster, get_policy_resources(config, enable_write_access=True)) + + +@pytest.mark.parametrize("iam_resource_prefix", ["/path-prefix/name-prefix-"]) +@pytest.mark.usefixtures("os", "instance") +def test_iam_resource_prefix( + initialize_resource_prefix_cli_creds, + pcluster_config_reader, + clusters_factory, + test_datadir, + scheduler_commands_factory, + s3_bucket_factory, + s3_bucket, + iam_resource_prefix, +): + cli_credentials = initialize_resource_prefix_cli_creds(test_datadir) + if cli_credentials: + for region, creds in cli_credentials.items(): + + bucket_name = s3_bucket + cfn_client, _, iam_client, _ = _create_boto3_clients(region) + create_config, _ = _get_config_create_and_update(test_datadir) + cluster_config = pcluster_config_reader( + config_file=create_config, min_count=1, bucket=bucket_name, iam_resource_prefix=iam_resource_prefix + ) + + cluster = clusters_factory(cluster_config, custom_cli_credentials=creds) + _test_iam_resource_in_cluster(cfn_client, iam_client, cluster.name, iam_resource_prefix) + + +def _split_resource_prefix(resource_prefix): + """To split Path and name prefix from Resource Prefix.""" + if resource_prefix: + split_index = resource_prefix.rfind("/") + 1 + return ( + None + if split_index == 0 + else resource_prefix + if split_index == len(resource_prefix) + else resource_prefix[:split_index], + None + if split_index == len(resource_prefix) + else resource_prefix + if split_index == 0 + else resource_prefix[split_index:], + ) + return None, None + + +def _check_iam_resource_prefix(resource_arn_list, iam_resource_prefix): + """Check the path and name of IAM resource ( Roles, policy and Instance profiles).""" + iam_path, iam_name_prefix = _split_resource_prefix(iam_resource_prefix) + for resource in resource_arn_list: + if "arn:aws:iam:" in resource: + if iam_path: + assert_that(resource).contains(iam_path) + else: + assert_that(resource).contains("/parallelcluster/") + if iam_name_prefix: + assert_that(resource).contains(iam_name_prefix) + + +def _test_iam_resource_in_cluster(cfn_client, iam_client, stack_name, iam_resource_prefix): + """Test IAM resources by checking the path and name prefix in AWS IAM and check cluster is created.""" + + # Check for cluster Status + + assert_that(cfn_client.describe_stacks(StackName=stack_name).get("Stacks")[0].get("StackStatus")).is_equal_to( + "CREATE_COMPLETE" + ) + + resources = cfn_client.describe_stack_resources(StackName=stack_name)["StackResources"] + resource_arn_list = [] + + for resource in resources: + resource_type = resource["ResourceType"] + if resource_type == "AWS::IAM::Role": + + resource_arn_list.append(iam_client.get_role(RoleName=resource["PhysicalResourceId"])["Role"]["Arn"]) + resource_arn_list.extend( + iam_client.list_role_policies(RoleName=resource["PhysicalResourceId"])["PolicyNames"] + ) + if resource_type == "AWS::IAM::InstanceProfile": + resource_arn_list.append( + iam_client.get_instance_profile(InstanceProfileName=resource["PhysicalResourceId"])["InstanceProfile"][ + "Arn" + ] + ) + _check_iam_resource_prefix(resource_arn_list, iam_resource_prefix) + + +@pytest.fixture(scope="class") +def initialize_resource_prefix_cli_creds(request): + """Create an IAM Role with Permission Boundary for testing Resource Prefix Feature.""" + + stack_factory = CfnStacksFactory(request.config.getoption("credential")) + + def _create_resource_prefix_cli_creds(test_datadir): + regions = request.config.getoption("regions") or get_all_regions(request.config.getoption("tests_config")) + stack_template_path = os_lib.path.join("..", test_datadir / "user-role-iam-resource-prefix.cfn.yaml") + with open(stack_template_path, encoding="utf-8") as stack_template_file: + stack_template_data = stack_template_file.read() + cli_creds = {} + for region in regions: + if request.config.getoption("iam_user_role_stack_name"): + stack_name = request.config.getoption("iam_user_role_stack_name") + logging.info(f"Using stack {stack_name} in region {region}") + stack = CfnStack( + name=stack_name, region=region, capabilities=["CAPABILITY_IAM"], template=stack_template_data + ) + else: + logging.info("Creating IAM roles for pcluster CLI") + stack_name = generate_stack_name( + "integ-tests-iam-rp-user-role", request.config.getoption("stackname_suffix") + ) + stack = CfnStack( + name=stack_name, region=region, capabilities=["CAPABILITY_IAM"], template=stack_template_data + ) + + stack_factory.create_stack(stack) + cli_creds[region] = stack.cfn_outputs["ParallelClusterUserRole"] + return cli_creds + + yield _create_resource_prefix_cli_creds + + if not request.config.getoption("no_delete"): + stack_factory.delete_all_stacks() + else: + logging.warning("Skipping deletion of CFN stacks because --no-delete option is set") diff --git a/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/pcluster.config.yaml b/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/pcluster.config.yaml new file mode 100644 index 0000000000..a98e4cca97 --- /dev/null +++ b/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/pcluster.config.yaml @@ -0,0 +1,41 @@ +Image: + Os: {{ os }} +Iam: + ResourcePrefix: {{ iam_resource_prefix }} +HeadNode: + InstanceType: {{ instance }} + Networking: + SubnetId: {{ public_subnet_id }} + Ssh: + KeyName: {{ key_name }} + Iam: + S3Access: + - BucketName: {{ bucket }} + KeyName: read_and_write/ + EnableWriteAccess: true +Scheduling: + Scheduler: {{ scheduler }} + SlurmQueues: + - Name: queue-0 + ComputeResources: + - Name: compute-resource-0 + InstanceType: {{ instance }} + MinCount: {{ min_count }} + Networking: + SubnetIds: + - {{ private_subnet_id }} + - Name: queue-1 + ComputeResources: + - Name: compute-resource-0 + InstanceType: {{ instance }} + MinCount: {{ min_count }} + Networking: + SubnetIds: + - {{ private_subnet_id }} + Iam: + S3Access: + - BucketName: {{ bucket }} + KeyName: read_and_write/ + EnableWriteAccess: true + + diff --git a/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/user-role-iam-resource-prefix.cfn.yaml b/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/user-role-iam-resource-prefix.cfn.yaml new file mode 100644 index 0000000000..b0ab6cb067 --- /dev/null +++ b/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/user-role-iam-resource-prefix.cfn.yaml @@ -0,0 +1,785 @@ +AWSTemplateFormatVersion: '2010-09-09' + +Parameters: + Region: + Type: String + Default: '*' + + EnableIamAdminAccess: + Description: WARNING - setting this to true grants IAM admin privileges + Type: String + Default: true + AllowedValues: + - true + - false + + EnablePermissionsBoundary: + Description: Force iam:CreateRole and iam:PutRolePolicy to use PermissionsBoundary + Type: String + Default: false + AllowedValues: + - true + - false + EnableFSxS3Access: + Description: | + When set to true the ParallelCluster API can access, write to the S3 buckets specified in the Filed FsxS3Bucket, it is needed to import/export from/to S3 when creating an FSx filesystem. + NOTE - setting this to true grants the Lambda function S3 Get*, List* and PutObject privileges on the buckets specified in FsxS3Buckets. + Type: String + Default: true + AllowedValues: + - true + - false + + FsxS3Buckets: + Description: | + Comma separated list of S3 bucket ARNs, to allow the lambda function to import/export from/to S3 when creating an FSx filesystem. + NOTE - The setting is used only when EnableFSxS3Access is set to true. (example arn:aws:s3:::,arn:aws:s3:::) + Type: String + Default: 'arn:*:s3:::integ-tests-*' + AllowedPattern: ^((arn:[a-z\-\*]*:s3:[a-z0-9\-]*:([0-9]{12})*:[^,\s\/]+)?(,arn:[a-z\-\*]*:s3:[a-z0-9\-]*:([0-9]{12})*:[^,\s\/]+)*)$|^\*$ + ConstraintDescription: | + The list of S3 buckets is incorrectly formatted. The list should have the format: arn::s3:::[,arn::s3:::,...] + Example: arn:aws:s3:::test-bucket-1,arn:aws:s3:::test-bucket-2,arn:aws:s3:::test-bucket-3 + +Conditions: + EnableIamPolicy: !Equals [!Ref EnableIamAdminAccess, true] + EnablePermissionsBoundary: !Equals [!Ref EnablePermissionsBoundary, true] + IsMultiRegion: !Equals [!Ref Region, '*'] + CreateIamResources: !Equals [true, true] # to keep aligned the resources in the API stack + EnableFSxS3AccessCondition: !And + - !Equals [!Ref EnableFSxS3Access, true] + - !Condition CreateIamResources + UseAllBucketsForFSxS3: !Equals [!Ref FsxS3Buckets, "*"] + +Resources: + + ParallelClusterUserRole: + Type: AWS::IAM::Role + Properties: + Path: /parallelcluster/ + AssumeRolePolicyDocument: + Statement: + - Effect: Allow + Action: sts:AssumeRole + Principal: + AWS: + - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:root' + ManagedPolicyArns: + - !Ref ParallelClusterClusterPolicy + - !Ref ParallelClusterClusterPolicyBatch + - !Ref ParallelClusterLogRetrievalPolicy + PermissionsBoundary: !Ref PermissionsBoundaryPolicy + + ### IAM POLICIES + + DefaultParallelClusterIamAdminPolicy: + Type: AWS::IAM::ManagedPolicy + Condition: EnableIamPolicy + Properties: + Roles: + - !Ref ParallelClusterUserRole + PolicyDocument: + Version: '2012-10-17' + Statement: + - Action: + - iam:CreateServiceLinkedRole + - iam:DeleteRole + - iam:TagRole + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* + Effect: Allow + Sid: IamRole + - Action: + - iam:CreateRole + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* + Effect: Allow + Condition: !If + - EnablePermissionsBoundary + - StringEquals: + iam:PermissionsBoundary: + - !Ref PermissionsBoundaryPolicy + - !Ref AWS::NoValue + Sid: IamCreateRole + - Action: + - iam:PutRolePolicy + - iam:DeleteRolePolicy + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* + Effect: Allow + Sid: IamInlinePolicy + Condition: !If + - EnablePermissionsBoundary + - StringEquals: + iam:PermissionsBoundary: + - !Ref PermissionsBoundaryPolicy + - !Ref AWS::NoValue + - Action: + - iam:AttachRolePolicy + - iam:DetachRolePolicy + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* + Condition: + ArnLike: + iam:PolicyARN: +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::aws:policy/CloudWatchAgentServerPolicy + - !Sub arn:${AWS::Partition}:iam::aws:policy/AmazonSSMManagedInstanceCore + - !Sub arn:${AWS::Partition}:iam::aws:policy/AWSBatchFullAccess + - !Sub arn:${AWS::Partition}:iam::aws:policy/AmazonS3ReadOnlyAccess + - !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AWSBatchServiceRole + - !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role + - !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy + - !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AmazonEC2SpotFleetTaggingRole + - !Sub arn:${AWS::Partition}:iam::aws:policy/EC2InstanceProfileForImageBuilder + - !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole + StringEquals: !If + - EnablePermissionsBoundary + - iam:PermissionsBoundary: + - !Ref PermissionsBoundaryPolicy + - !Ref AWS::NoValue + Effect: Allow + Sid: IamPolicy + + ### CLUSTER ACTIONS POLICIES + + ParallelClusterClusterPolicyBatch: + Type: AWS::IAM::ManagedPolicy + Condition: CreateIamResources + Properties: + PolicyDocument: + Version: '2012-10-17' + Statement: + - Action: + - iam:GetRole + - iam:PassRole + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* + Effect: Allow + Condition: + StringEqualsIfExists: + iam:PassedToService: + - ecs-tasks.amazonaws.com + - batch.amazonaws.com + - codebuild.amazonaws.com + Sid: IamPassRole + - Action: + - iam:CreateServiceLinkedRole + - iam:DeleteServiceLinkedRole + Resource: + # AWS Batch creates a service linked role automatically for the ComputeEnvironment + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/aws-service-role/batch.amazonaws.com/* + Effect: Allow + Condition: + StringEquals: + iam:AWSServiceName: + - batch.amazonaws.com + - Action: + - codebuild:* + Resource: !Sub arn:${AWS::Partition}:codebuild:${Region}:${AWS::AccountId}:project/pcluster-* + Effect: Allow + - Action: + - ecr:* + Resource: '*' + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: ECR + - Action: + - batch:* + Resource: '*' + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: Batch + - Action: + - events:* + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Resource: '*' + Sid: AmazonCloudWatchEvents + - Action: + - ecs:DescribeContainerInstances + - ecs:ListContainerInstances + Resource: '*' + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: ECS + + FSxS3AccessPolicy: + Type: AWS::IAM::Policy + Condition: EnableFSxS3AccessCondition + Properties: + PolicyName: FSxS3AccessPolicy + PolicyDocument: + Version: '2012-10-17' + Statement: + - Action: + - iam:CreateServiceLinkedRole + - iam:AttachRolePolicy + - iam:PutRolePolicy + Resource: !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/aws-service-role/s3.data-source.lustre.fsx.amazonaws.com/* + Effect: Allow + Sid: FSxS3PoliciesAttach + - Action: + - s3:Get* + - s3:List* + - s3:PutObject + Resource: !Split + - "," + - !If + - UseAllBucketsForFSxS3 + - "*" + - !Sub ["${FsxS3Buckets},${FsxS3BucketsObjects}", FsxS3BucketsObjects: !Join ["/*,", !Split [",", !Sub "${FsxS3Buckets}/*"]]] + Effect: Allow + Sid: EnableFSxS3Access + Roles: + - !Ref ParallelClusterUserRole + + ParallelClusterClusterPolicy: + Type: AWS::IAM::ManagedPolicy + Condition: CreateIamResources + Properties: + PolicyDocument: + Version: '2012-10-17' + Statement: + - Action: + - ec2:Describe* + Resource: '*' + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: EC2Read + - Action: + - ec2:AllocateAddress + - ec2:AssociateAddress + - ec2:AttachNetworkInterface + - ec2:AuthorizeSecurityGroupEgress + - ec2:AuthorizeSecurityGroupIngress + - ec2:CreateLaunchTemplate + - ec2:CreateLaunchTemplateVersion + - ec2:CreateNetworkInterface + - ec2:CreatePlacementGroup + - ec2:CreateSecurityGroup + - ec2:CreateSnapshot + - ec2:CreateTags + - ec2:CreateVolume + - ec2:DeleteLaunchTemplate + - ec2:DeleteNetworkInterface + - ec2:DeletePlacementGroup + - ec2:DeleteSecurityGroup + - ec2:DeleteVolume + - ec2:DisassociateAddress + - ec2:ModifyLaunchTemplate + - ec2:ModifyNetworkInterfaceAttribute + - ec2:ModifyVolume + - ec2:ModifyVolumeAttribute + - ec2:ReleaseAddress + - ec2:RevokeSecurityGroupEgress + - ec2:RevokeSecurityGroupIngress + - ec2:RunInstances + - ec2:TerminateInstances + Resource: '*' + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: EC2Write + - Action: + - dynamodb:DescribeTable + - dynamodb:ListTagsOfResource + - dynamodb:CreateTable + - dynamodb:DeleteTable + - dynamodb:GetItem + - dynamodb:PutItem + - dynamodb:UpdateItem + - dynamodb:Query + - dynamodb:TagResource + Resource: !Sub arn:${AWS::Partition}:dynamodb:${Region}:${AWS::AccountId}:table/parallelcluster-* + Effect: Allow + Sid: DynamoDB + - Action: + - route53:ChangeResourceRecordSets + - route53:ChangeTagsForResource + - route53:CreateHostedZone + - route53:DeleteHostedZone + - route53:GetChange + - route53:GetHostedZone + - route53:ListResourceRecordSets + - route53:ListQueryLoggingConfigs + Resource: '*' + Effect: Allow + Sid: Route53HostedZones + - Action: + - cloudformation:* + Resource: '*' + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: CloudFormation + - Action: + - cloudwatch:PutDashboard + - cloudwatch:ListDashboards + - cloudwatch:DeleteDashboards + - cloudwatch:GetDashboard + Resource: '*' + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: CloudWatch + - Action: + - iam:GetRole + - iam:GetRolePolicy + - iam:GetPolicy + - iam:SimulatePrincipalPolicy + - iam:GetInstanceProfile + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/path-prefix/name-prefix-* +# Some of the Inline ParallelCluster Policies dont have a path + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/name-prefix-* + Effect: Allow + Sid: IamRead + - Action: + - iam:CreateInstanceProfile + - iam:DeleteInstanceProfile + - iam:AddRoleToInstanceProfile + - iam:RemoveRoleFromInstanceProfile + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/* + Effect: Allow + Sid: IamInstanceProfile + - Action: + - iam:GetRole + - iam:PassRole + Resource: +# Some of the CleanupRoles have naming convention of /path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* +# Some of the Roles like HN and CN have naming convention of /path-prefix/{clsuter_name}/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* + + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/name-prefix-* + + Effect: Allow + Condition: + StringEqualsIfExists: + iam:PassedToService: + - lambda.amazonaws.com + - ec2.amazonaws.com + - ec2.amazonaws.com.cn + - spotfleet.amazonaws.com + Sid: IamPassRole + - Action: + - iam:CreateServiceLinkedRole + - iam:DeleteServiceLinkedRole + Resource: '*' + Effect: Allow + Condition: + StringEquals: + iam:AWSServiceName: + - fsx.amazonaws.com + - s3.data-source.lustre.fsx.amazonaws.com + - Action: + - lambda:CreateFunction + - lambda:TagResource + - lambda:DeleteFunction + - lambda:GetFunctionConfiguration + - lambda:GetFunction + - lambda:InvokeFunction + - lambda:AddPermission + - lambda:RemovePermission + - lambda:UpdateFunctionConfiguration + - lambda:ListTags + - lambda:UntagResource + Resource: + - !Sub arn:${AWS::Partition}:lambda:${Region}:${AWS::AccountId}:function:parallelcluster-* + - !Sub arn:${AWS::Partition}:lambda:${Region}:${AWS::AccountId}:function:pcluster-* + Effect: Allow + Sid: Lambda + - Action: + - s3:* + Resource: + - !Sub arn:${AWS::Partition}:s3:::parallelcluster-* + - !Sub arn:${AWS::Partition}:s3:::aws-parallelcluster-* + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: S3ResourcesBucket + - Action: + - s3:Get* + - s3:List* + Resource: !Sub arn:${AWS::Partition}:s3:::${Region}-aws-parallelcluster* + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: S3ParallelClusterReadOnly + - Action: + - fsx:* + Resource: + - !Sub arn:${AWS::Partition}:fsx:${Region}:${AWS::AccountId}:* + Effect: Allow + Sid: FSx + - Action: + - elasticfilesystem:* + Resource: + - !Sub arn:${AWS::Partition}:elasticfilesystem:${Region}:${AWS::AccountId}:* + Effect: Allow + Sid: EFS + - Action: + - logs:DeleteLogGroup + - logs:PutRetentionPolicy + - logs:DescribeLogGroups + - logs:CreateLogGroup + - logs:TagResource + - logs:UntagResource + Resource: '*' + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: CloudWatchLogs + - Action: + - resource-groups:ListGroupResources + - resource-groups:GetGroupConfiguration + Resource: '*' + Effect: Allow + Sid: ResourceGroupRead + + ### LOG COMMANDS + + ParallelClusterLogRetrievalPolicy: + Type: AWS::IAM::ManagedPolicy + Condition: CreateIamResources + Properties: + Description: Policies needed to retrieve cluster and images logs + PolicyDocument: + Version: '2012-10-17' + Statement: + - Action: + - logs:DescribeLogGroups + - logs:FilterLogEvents + - logs:GetLogEvents + - logs:CreateExportTask + - logs:DescribeLogStreams + - logs:DescribeExportTasks + Resource: '*' + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + + + ### PERMISSIONS BOUNDARY + + PermissionsBoundaryPolicy: + Type: AWS::IAM::ManagedPolicy + Properties: + PolicyDocument: + Version: '2012-10-17' + Statement: + - Action: + - route53:ListResourceRecordSets + - route53:ChangeResourceRecordSets + - route53:CreateHostedZone + - route53:ChangeTagsForResource + - route53:DeleteHostedZone + - route53:GetChange + - route53:GetHostedZone + - route53:ListQueryLoggingConfigs + Effect: Allow + Resource: '*' + - Action: ec2:TerminateInstances + Condition: + StringEquals: + ec2:ResourceTag/parallelcluster:node-type: ComputeNode + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + Effect: Allow + Resource: + - !Sub arn:${AWS::Partition}:s3:::${AWS::Region}-aws-parallelcluster/* + - !Sub arn:${AWS::Partition}:s3:::dcv-license.${AWS::Region}/* + - !Sub arn:${AWS::Partition}:s3:::ec2imagebuilder* + - Action: + - iam:GetRole + - iam:PassRole + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/name-prefix-* + Effect: Allow + - Action: + - cloudformation:* + Effect: Allow + Resource: '*' + - Action: + - s3:DeleteObject + - s3:DeleteObjectVersion + - s3:ListBucket + - s3:ListBucketVersions + - s3:GetObject + - s3:PutObject + - s3:GetObjectVersion + Effect: Allow + Resource: + - !Sub arn:${AWS::Partition}:s3:::parallelcluster-*-v1-do-not-delete + - !Sub arn:${AWS::Partition}:s3:::parallelcluster-*-v1-do-not-delete/* + - Action: + - ecr:BatchDeleteImage + - ecr:ListImages + Effect: Allow + Resource: !Sub arn:${AWS::Partition}:ecr:${AWS::Region}:${AWS::AccountId}:repository/*parallelcluster* + - Action: + - lambda:DeleteFunction + - lambda:RemovePermission + - lambda:CreateFunction + - lambda:TagResource + - lambda:GetFunctionConfiguration + - lambda:GetFunction + - lambda:InvokeFunction + - lambda:AddPermission + - lambda:UpdateFunctionConfiguration + - lambda:ListTags + - lambda:UntagResource + Resource: + - !Sub arn:${AWS::Partition}:lambda:${Region}:${AWS::AccountId}:function:parallelcluster-* + - !Sub arn:${AWS::Partition}:lambda:${Region}:${AWS::AccountId}:function:pcluster-* + - !Sub arn:${AWS::Partition}:lambda:${AWS::Region}:${AWS::AccountId}:function:ParallelClusterImage-* + Effect: Allow + - Action: + - SNS:GetTopicAttributes + - SNS:DeleteTopic + - SNS:GetSubscriptionAttributes + - SNS:Unsubscribe + Resource: !Sub 'arn:${AWS::Partition}:sns:${AWS::Region}:${AWS::AccountId}:ParallelClusterImage-*' + Effect: Allow + # From arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore + - Effect: Allow + Action: + - ssm:DescribeAssociation + - ssm:GetDeployablePatchSnapshotForInstance + - ssm:GetDocument + - ssm:DescribeDocument + - ssm:GetManifest + - ssm:GetParameter + - ssm:GetParameters + - ssm:ListAssociations + - ssm:ListInstanceAssociations + - ssm:PutInventory + - ssm:PutComplianceItems + - ssm:PutConfigurePackageResult + - ssm:UpdateAssociationStatus + - ssm:UpdateInstanceAssociationStatus + - ssm:UpdateInstanceInformation + - ec2messages:AcknowledgeMessage + - ec2messages:DeleteMessage + - ec2messages:FailMessage + - ec2messages:GetEndpoint + - ec2messages:GetMessages + - ec2messages:SendReply + - ssmmessages:CreateControlChannel + - ssmmessages:CreateDataChannel + - ssmmessages:OpenControlChannel + - ssmmessages:OpenDataChannel + Resource: "*" + - Effect: Allow + Action: + - logs:CreateLogStream + - logs:CreateLogGroup + - logs:PutLogEvents + - logs:TagResource + - logs:UntagResource + - logs:DeleteLogGroup + - logs:PutRetentionPolicy + - logs:DescribeLogGroups + Resource: '*' + # Resource Prefix specific Actions + - Action: + - iam:CreateRole + - iam:AttachRolePolicy + - iam:DetachRolePolicy + - iam:PutRolePermissionsBoundary + - iam:TagRole + - iam:UntagRole + - iam:ListRoleTags + - iam:ListRolePolicies + - iam:GetRolePolicy + - iam:PutRolePolicy + - iam:ListAttachedRolePolicies + - iam:DeleteRole + - iam:ListInstanceProfiles + - iam:ListInstanceProfilesForRole + - iam:DeleteRolePolicy + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/name-prefix-* + Effect: Allow + - Action: + - iam:CreateInstanceProfile + - iam:DeleteInstanceProfile + - iam:AddRoleToInstanceProfile + - iam:RemoveRoleFromInstanceProfile + - iam:TagInstanceProfile + - iam:UntagInstanceProfile + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/* + Effect: Allow + - Action: + - iam:CreatePolicy + - iam:CreatePolicyVersion + - iam:DeletePolicyVersion + - iam:GetPolicyVersion + - iam:GetPolicy + - iam:DeletePolicy + - iam:ListInstanceProfiles + - iam:ListInstanceProfilesForRole + - iam:ListEntitiesForPolicy + - iam:ListPolicyVersions + - iam:TagPolicy + - iam:UntagPolicy + Resource: +# - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/path-prefix/name-prefix-*' + - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/name-prefix-*' + + Effect: Allow + - Action: + - ec2:Describe* + - ec2:AllocateAddress + - ec2:AssociateAddress + - ec2:AttachNetworkInterface + - ec2:AuthorizeSecurityGroupEgress + - ec2:AuthorizeSecurityGroupIngress + - ec2:AttachVolume + - ec2:CreateLaunchTemplate + - ec2:CreateLaunchTemplateVersion + - ec2:CreateNetworkInterface + - ec2:CreatePlacementGroup + - ec2:CreateSecurityGroup + - ec2:CreateSnapshot + - ec2:CreateTags + - ec2:CreateVolume + - ec2:DeleteLaunchTemplate + - ec2:DeleteNetworkInterface + - ec2:DeletePlacementGroup + - ec2:DeleteSecurityGroup + - ec2:DeleteVolume + - ec2:DisassociateAddress + - ec2:ModifyLaunchTemplate + - ec2:ModifyNetworkInterfaceAttribute + - ec2:ModifyVolume + - ec2:ModifyVolumeAttribute + - ec2:ReleaseAddress + - ec2:RevokeSecurityGroupEgress + - ec2:RevokeSecurityGroupIngress + - ec2:RunInstances + - ec2:TerminateInstances + Resource: '*' + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: EC2Write + - Action: + - dynamodb:DescribeTable + - dynamodb:ListTagsOfResource + - dynamodb:CreateTable + - dynamodb:DeleteTable + - dynamodb:GetItem + - dynamodb:PutItem + - dynamodb:UpdateItem + - dynamodb:Query + - dynamodb:TagResource + Resource: !Sub arn:${AWS::Partition}:dynamodb:${Region}:${AWS::AccountId}:table/parallelcluster-* + Effect: Allow + Sid: DynamoDB + - Action: + - cloudwatch:PutDashboard + - cloudwatch:ListDashboards + - cloudwatch:DeleteDashboards + - cloudwatch:GetDashboard + Resource: '*' + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: CloudWatch + - Action: + - iam:CreateServiceLinkedRole + - iam:DeleteServiceLinkedRole + - iam:AttachRolePolicy + - iam:PutRolePolicy + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/aws-service-role/* + Effect: Allow + + +Outputs: + ParallelClusterUserRole: + Value: !GetAtt ParallelClusterUserRole.Arn diff --git a/tests/integration-tests/utils.py b/tests/integration-tests/utils.py index f404976d88..a7c06b1637 100644 --- a/tests/integration-tests/utils.py +++ b/tests/integration-tests/utils.py @@ -81,7 +81,15 @@ def retry_if_subprocess_error(exception): return isinstance(exception, subprocess.CalledProcessError) -def run_command(command, capture_output=True, log_error=True, env=None, timeout=None, raise_on_error=True, shell=False): +def run_command( + command, + capture_output=True, + log_error=True, + env=None, + timeout=None, + raise_on_error=True, + shell=False, +): """Execute shell command.""" if isinstance(command, str) and not shell: command = shlex.split(command)