From 1546d8f158104a5b3638c9765b4597d855bf6ac5 Mon Sep 17 00:00:00 2001 From: Hanwen Date: Tue, 15 Dec 2020 16:32:42 -0500 Subject: [PATCH] Add iam_lambda_role parameter under cluster section in the config file 1. Add `iam_lambda_role` parameter to the config file. If specified, this role will be attached to all Lambda function resources created by CloudFormation Templates. 2. If both `ec2_iam_role` and `iam_lambda_role` are provided, and the scheduler is `sge`, `torque`, or `slurm`, there will be no created by `pcluster` commands. Note that if `awsbatch` is the scheduler, there will be role created during `pcluster create`. 3. Integration tests: Extract some functions (role creation, policy creation) from `storage.kms_key_factory` to `conftest`. The code in `kms_key_factory` is kept untouched to limit the scale of this commit. Signed-off-by: Hanwen --- CHANGELOG.md | 4 + cli/src/pcluster/config/mappings.py | 4 + cli/src/pcluster/examples/config | 3 + cli/tests/pcluster/config/defaults.py | 8 +- cloudformation/aws-parallelcluster.cfn.json | 36 ++++- cloudformation/batch-substack.cfn.json | 46 ++++-- .../compute-fleet-hit-substack.cfn.yaml | 17 ++- .../configs/common/common.yaml | 10 +- .../integration-tests/configs/new_region.yaml | 4 +- tests/integration-tests/conftest.py | 111 ++++++++++++++- .../batch_lambda_function_policy.json | 44 ++++++ .../traditional_lambda_function_policy.json | 64 +++++++++ tests/integration-tests/tests/common/utils.py | 5 + tests/integration-tests/tests/iam/test_iam.py | 133 ++++++++++++++++++ .../test_iam_policies/pcluster.config.ini | 0 .../tests/iam/test_iam/test_iam_roles/HIT.ini | 29 ++++ .../tests/iam/test_iam/test_iam_roles/SIT.ini | 28 ++++ .../tests/iam_policies/test_iam_policies.py | 54 ------- 18 files changed, 525 insertions(+), 75 deletions(-) create mode 100644 tests/integration-tests/resources/batch_lambda_function_policy.json create mode 100644 tests/integration-tests/resources/traditional_lambda_function_policy.json create mode 100644 tests/integration-tests/tests/iam/test_iam.py rename tests/integration-tests/tests/{iam_policies/test_iam_policies => iam/test_iam}/test_iam_policies/pcluster.config.ini (100%) create mode 100644 tests/integration-tests/tests/iam/test_iam/test_iam_roles/HIT.ini create mode 100644 tests/integration-tests/tests/iam/test_iam/test_iam_roles/SIT.ini delete mode 100644 tests/integration-tests/tests/iam_policies/test_iam_policies.py diff --git a/CHANGELOG.md b/CHANGELOG.md index a359206679..35a0769fb4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,10 @@ CHANGELOG failures due to CloudFormation throttling. - Add support for io2 EBS volume type. - Install EFA kernel module also on ARM instances with `alinux2` and `ubuntu1804` +- Add `iam_lambda_role` parameter under `cluster` section to enable the possibility to specify an existing IAM role to + be used by AWS Lambda functions in CloudFormation. + When using `sge`, `torque`, or `slurm` as the scheduler, + `pcluster` will not create any IAM role if both `ec2_iam_role` and `iam_lambda_role` are provided. **CHANGES** diff --git a/cli/src/pcluster/config/mappings.py b/cli/src/pcluster/config/mappings.py index cfd694fb69..36e4575a54 100644 --- a/cli/src/pcluster/config/mappings.py +++ b/cli/src/pcluster/config/mappings.py @@ -1007,6 +1007,10 @@ "validators": [s3_bucket_validator], "update_policy": UpdatePolicy.READ_ONLY_RESOURCE_BUCKET, }), + ("iam_lambda_role", { + "cfn_param_mapping": "IAMLambdaRoleName", + "update_policy": UpdatePolicy.SUPPORTED, + }), ] diff --git a/cli/src/pcluster/examples/config b/cli/src/pcluster/examples/config index f3494474ba..914766f190 100644 --- a/cli/src/pcluster/examples/config +++ b/cli/src/pcluster/examples/config @@ -109,6 +109,9 @@ key_name = mykey # Existing EC2 IAM policies to be associated with the EC2 instances # (defaults to NONE) #additional_iam_policies = NONE +# Existing IAM role to be associated with Lambda functions +# (defaults to NONE) +#iam_lambda_role = NONE # Disable Hyperthreading on all instances # (defaults to False) #disable_hyperthreading = false diff --git a/cli/tests/pcluster/config/defaults.py b/cli/tests/pcluster/config/defaults.py index 73d0e4cae4..fec837315a 100644 --- a/cli/tests/pcluster/config/defaults.py +++ b/cli/tests/pcluster/config/defaults.py @@ -145,6 +145,7 @@ "architecture": "x86_64", "network_interfaces_count": ["1", "1"], "cluster_resource_bucket": None, + "iam_lambda_role": None, } DEFAULT_CLUSTER_HIT_DICT = { @@ -194,6 +195,7 @@ "architecture": "x86_64", "network_interfaces_count": ["1", "1"], "cluster_resource_bucket": None, # cluster_resource_bucket no default, but this is here to make testing easier + "iam_lambda_role": None, } DEFAULT_CW_LOG_DICT = {"enable": True, "retention_days": 14} @@ -226,8 +228,8 @@ class DefaultDict(Enum): # ------------------ Default CFN parameters ------------------ # # number of CFN parameters created by the PclusterConfig object. -CFN_SIT_CONFIG_NUM_OF_PARAMS = 61 -CFN_HIT_CONFIG_NUM_OF_PARAMS = 52 +CFN_SIT_CONFIG_NUM_OF_PARAMS = 62 +CFN_HIT_CONFIG_NUM_OF_PARAMS = 53 # CFN parameters created by the pcluster CLI CFN_CLI_RESERVED_PARAMS = ["ArtifactS3RootDirectory", "RemoveBucketOnDeletion"] @@ -343,6 +345,7 @@ class DefaultDict(Enum): # architecture "Architecture": "x86_64", "NetworkInterfacesCount": "1,1", + "IAMLambdaRoleName": "NONE", } @@ -412,6 +415,7 @@ class DefaultDict(Enum): # architecture "Architecture": "x86_64", "NetworkInterfacesCount": "1,1", + "IAMLambdaRoleName": "NONE", } diff --git a/cloudformation/aws-parallelcluster.cfn.json b/cloudformation/aws-parallelcluster.cfn.json index ad01d2604f..3ecbb288e4 100644 --- a/cloudformation/aws-parallelcluster.cfn.json +++ b/cloudformation/aws-parallelcluster.cfn.json @@ -285,6 +285,11 @@ "Type": "CommaDelimitedList", "Default": "NONE" }, + "IAMLambdaRoleName": { + "Description": "Existing IAM role name for Lambda functions", + "Type": "String", + "Default": "NONE" + }, "VPCSecurityGroupId": { "Description": "Existing VPC security group Id", "Type": "String", @@ -660,6 +665,14 @@ "NONE" ] }, + "CreateIAMLambdaRole": { + "Fn::Equals": [ + { + "Ref": "IAMLambdaRoleName" + }, + "NONE" + ] + }, "AddHITIamPolicies": { "Fn::And": [ { @@ -2350,6 +2363,9 @@ "MasterServerSubstack", "Outputs.MasterPrivateIP" ] + }, + "IAMLambdaRoleName": { + "Ref": "IAMLambdaRoleName" } }, "TemplateURL": { @@ -2510,7 +2526,8 @@ "PolicyName": "LambdaPolicy" } ] - } + }, + "Condition": "CreateIAMLambdaRole" }, "CleanupResourcesS3BucketCustomResource": { "Type": "AWS::CloudFormation::CustomResource", @@ -2587,9 +2604,17 @@ "Handler": "cleanup_resources.handler", "MemorySize": 128, "Role": { - "Fn::GetAtt": [ - "CleanupResourcesFunctionExecutionRole", - "Arn" + "Fn::If": [ + "CreateIAMLambdaRole", + { + "Fn::GetAtt": [ + "CleanupResourcesFunctionExecutionRole", + "Arn" + ] + }, + { + "Fn::Sub": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/${IAMLambdaRoleName}" + } ] }, "Runtime": "python3.8", @@ -3963,6 +3988,9 @@ } ] }, + "IAMLambdaRoleName": { + "Ref": "IAMLambdaRoleName" + }, "ResourcesS3Bucket": { "Ref": "ResourcesS3Bucket" }, diff --git a/cloudformation/batch-substack.cfn.json b/cloudformation/batch-substack.cfn.json index 4be334dd4d..980fceb0fd 100644 --- a/cloudformation/batch-substack.cfn.json +++ b/cloudformation/batch-substack.cfn.json @@ -99,6 +99,10 @@ "MasterPrivateIP": { "Description": "Private IP of the head node", "Type": "String" + }, + "IAMLambdaRoleName": { + "Description": "Existing IAM role name for Lambda functions", + "Type": "String" } }, "Conditions": { @@ -117,6 +121,14 @@ }, "arm64" ] + }, + "CreateIAMLambdaRole": { + "Fn::Equals": [ + { + "Ref": "IAMLambdaRoleName" + }, + "NONE" + ] } }, "Resources": { @@ -821,9 +833,17 @@ "Handler": "manage_docker_images.handler", "MemorySize": 128, "Role": { - "Fn::GetAtt": [ - "ManageDockerImagesFunctionExecutionRole", - "Arn" + "Fn::If": [ + "CreateIAMLambdaRole", + { + "Fn::GetAtt": [ + "ManageDockerImagesFunctionExecutionRole", + "Arn" + ] + }, + { + "Fn::Sub": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/${IAMLambdaRoleName}" + } ] }, "Runtime": "python3.6", @@ -897,7 +917,8 @@ "PolicyName": "LambdaPolicy" } ] - } + }, + "Condition": "CreateIAMLambdaRole" }, "DockerBuildWaitHandle": { "Type": "AWS::CloudFormation::WaitConditionHandle", @@ -952,7 +973,8 @@ "PolicyName": "LambdaPolicy" } ] - } + }, + "Condition": "CreateIAMLambdaRole" }, "SendBuildNotificationFunction": { "Type": "AWS::Lambda::Function", @@ -971,9 +993,17 @@ "Handler": "send_build_notification.handler", "MemorySize": 128, "Role": { - "Fn::GetAtt": [ - "SendBuildNotificationFunctionExecutionRole", - "Arn" + "Fn::If": [ + "CreateIAMLambdaRole", + { + "Fn::GetAtt": [ + "SendBuildNotificationFunctionExecutionRole", + "Arn" + ] + }, + { + "Fn::Sub": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/${IAMLambdaRoleName}" + } ] }, "Runtime": "python3.6", diff --git a/cloudformation/compute-fleet-hit-substack.cfn.yaml b/cloudformation/compute-fleet-hit-substack.cfn.yaml index 69c1df4e0d..3efd304eb1 100644 --- a/cloudformation/compute-fleet-hit-substack.cfn.yaml +++ b/cloudformation/compute-fleet-hit-substack.cfn.yaml @@ -85,6 +85,8 @@ Parameters: Type: AWS::EC2::VPC::Id RootRole: Type: String + IAMLambdaRoleName: + Type: String ResourcesS3Bucket: Type: String ArtifactS3RootDirectory: @@ -101,6 +103,9 @@ Conditions: UseAssociatePublicIpAddress: !Equals - !Ref 'AssociatePublicIpAddress' - true + CreateIAMLambdaRole: !Equals + - !Ref 'IAMLambdaRoleName' + - NONE Resources: {%- for queue, queue_config in queues.items() %} {%- for compute_resource in queue_config.compute_resource_settings.values() %} @@ -528,7 +533,10 @@ Resources: S3Key: !Sub '${ArtifactS3RootDirectory}/custom_resources_code/artifacts.zip' Handler: cleanup_resources.handler MemorySize: 128 - Role: !GetAtt 'CleanupRoute53FunctionExecutionRole.Arn' + Role: !If + - CreateIAMLambdaRole + - !GetAtt 'CleanupRoute53FunctionExecutionRole.Arn' + - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:role/${IAMLambdaRoleName}' Runtime: python3.8 Timeout: 900 CleanupRoute53CustomResource: @@ -570,6 +578,7 @@ Resources: - ClusterHostedZone: !Ref 'ClusterHostedZone' Version: '2012-10-17' PolicyName: LambdaPolicy + Condition: CreateIAMLambdaRole {%- endif %} UpdateWaiterFunction: Type: AWS::Lambda::Function @@ -580,7 +589,10 @@ Resources: S3Key: !Sub '${ArtifactS3RootDirectory}/custom_resources_code/artifacts.zip' Handler: wait_for_update.handler MemorySize: 128 - Role: !GetAtt 'UpdateWaiterFunctionExecutionRole.Arn' + Role: !If + - CreateIAMLambdaRole + - !GetAtt 'UpdateWaiterFunctionExecutionRole.Arn' + - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:role/${IAMLambdaRoleName}' Runtime: python3.8 Timeout: 900 UpdateWaiterFunctionExecutionRole: @@ -613,6 +625,7 @@ Resources: Resource: !Sub 'arn:${AWS::Partition}:dynamodb:${AWS::Region}:${AWS::AccountId}:table/${DynamoDBTable}' Version: '2012-10-17' PolicyName: LambdaPolicy + Condition: CreateIAMLambdaRole Metadata: RootRole: !Ref 'RootRole' VPCId: !Ref 'VPCId' diff --git a/tests/integration-tests/configs/common/common.yaml b/tests/integration-tests/configs/common/common.yaml index 57af542f04..34b28b0c5a 100644 --- a/tests/integration-tests/configs/common/common.yaml +++ b/tests/integration-tests/configs/common/common.yaml @@ -219,13 +219,19 @@ efa: instances: ["p4d.24xlarge"] oss: ["alinux", "ubuntu1804", "centos7"] schedulers: ["sge"] -iam_policies: - test_iam_policies.py::test_iam_policies: +iam: + test_iam.py::test_iam_policies: dimensions: - regions: ["eu-north-1"] instances: {{ common.INSTANCES_DEFAULT_X86 }} oss: ["alinux2"] schedulers: ["slurm", "awsbatch"] + test_iam.py::test_iam_roles: + dimensions: + - regions: ["us-east-2"] + schedulers: ["awsbatch", "slurm", "sge"] + oss: ["alinux2"] + instances: {{ common.INSTANCES_DEFAULT_X86 }} intel_hpc: test_intel_hpc.py::test_intel_hpc: dimensions: diff --git a/tests/integration-tests/configs/new_region.yaml b/tests/integration-tests/configs/new_region.yaml index e77e1f3ed0..3f7b79e4df 100644 --- a/tests/integration-tests/configs/new_region.yaml +++ b/tests/integration-tests/configs/new_region.yaml @@ -108,8 +108,8 @@ test-suites: instances: ["c5n.18xlarge"] oss: ["alinux2"] schedulers: ["slurm"] - iam_policies: - test_iam_policies.py::test_iam_policies: + iam: + test_iam.py::test_iam_policies: dimensions: - regions: {{ NEW_REGION }} instances: {{ common.INSTANCES_DEFAULT_X86 }} diff --git a/tests/integration-tests/conftest.py b/tests/integration-tests/conftest.py index 50cdac7782..ae7beee5f0 100644 --- a/tests/integration-tests/conftest.py +++ b/tests/integration-tests/conftest.py @@ -18,11 +18,13 @@ import os import random import re +import time from shutil import copyfile from traceback import format_tb import boto3 import configparser +import pkg_resources import pytest from cfn_stacks_factory import CfnStack, CfnStacksFactory from clusters_factory import Cluster, ClustersFactory @@ -52,7 +54,7 @@ unset_credentials, ) -from tests.common.utils import retrieve_pcluster_ami_without_standard_naming +from tests.common.utils import get_sts_endpoint, retrieve_pcluster_ami_without_standard_naming def pytest_addoption(parser): @@ -293,6 +295,11 @@ def _cluster_factory(cluster_config, extra_args=None, raise_on_error=True): ) +@pytest.fixture(scope="class") +def cluster_model(scheduler): + return "HIT" if scheduler == "slurm" else "SIT" + + def _write_cluster_config_to_outdir(request, cluster_config): out_dir = request.config.getoption("output_dir") @@ -618,6 +625,108 @@ def vpc_stacks(cfn_stacks_factory, request): return vpc_stacks +@pytest.fixture(scope="class") +def common_pcluster_policies(region): + """Create four policies to be attached to ec2_iam_role, iam_lamda_role for awsbatch or traditional schedulers.""" + policies = {} + policies["awsbatch_instance_policy"] = _create_iam_policies( + "integ-tests-ParallelClusterInstancePolicy-batch-" + random_alphanumeric(), region, "batch_instance_policy.json" + ) + policies["traditional_instance_policy"] = _create_iam_policies( + "integ-tests-ParallelClusterInstancePolicy-traditional-" + random_alphanumeric(), + region, + "traditional_instance_policy.json", + ) + policies["awsbatch_lambda_policy"] = _create_iam_policies( + "integ-tests-ParallelClusterLambdaPolicy-batch-" + random_alphanumeric(), + region, + "batch_lambda_function_policy.json", + ) + policies["traditional_lambda_policy"] = _create_iam_policies( + "integ-tests-ParallelClusterLambdaPolicy-traditional-" + random_alphanumeric(), + region, + "traditional_lambda_function_policy.json", + ) + + yield policies + + iam_client = boto3.client("iam", region_name=region) + for policy in policies.values(): + iam_client.delete_policy(PolicyArn=policy) + + +@pytest.fixture(scope="class") +def role_factory(region): + roles = [] + iam_client = boto3.client("iam", region_name=region) + + def create_role(trusted_service, policies=()): + iam_role_name = f"integ-tests_{trusted_service}_{region}_{random_alphanumeric()}" + logging.info(f"Creating iam role {iam_role_name} for {trusted_service}") + + partition = _get_arn_partition(region) + domain_suffix = ".cn" if partition == "aws-cn" else "" + + trust_relationship_policy_ec2 = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": {"Service": f"{trusted_service}.amazonaws.com{domain_suffix}"}, + "Action": "sts:AssumeRole", + } + ], + } + iam_client.create_role( + RoleName=iam_role_name, + AssumeRolePolicyDocument=json.dumps(trust_relationship_policy_ec2), + Description="Role for create custom KMS key", + ) + + logging.info(f"Attaching iam policy to the role {iam_role_name}...") + for policy in policies: + iam_client.attach_role_policy(RoleName=iam_role_name, PolicyArn=policy) + + # Having time.sleep here because because it take a while for the the IAM role to become valid for use in the + # put_key_policy step for creating KMS key, read the following link for reference : + # https://stackoverflow.com/questions/20156043/how-long-should-i-wait-after-applying-an-aws-iam-policy-before-it-is-valid + time.sleep(60) + logging.info(f"Iam role is ready: {iam_role_name}") + roles.append({"role_name": iam_role_name, "policies": policies}) + return iam_role_name + + yield create_role + + for role in roles: + role_name = role["role_name"] + policies = role["policies"] + for policy in policies: + iam_client.detach_role_policy(RoleName=role_name, PolicyArn=policy) + logging.info(f"Deleting iam role {role_name}") + iam_client.delete_role(RoleName=role_name) + + +def _create_iam_policies(iam_policy_name, region, policy_filename): + logging.info("Creating iam policy {0}...".format(iam_policy_name)) + file_loader = FileSystemLoader(pkg_resources.resource_filename(__name__, "/resources")) + env = Environment(loader=file_loader, trim_blocks=True, lstrip_blocks=True) + partition = _get_arn_partition(region) + account_id = ( + boto3.client("sts", region_name=region, endpoint_url=get_sts_endpoint(region)) + .get_caller_identity() + .get("Account") + ) + parallel_cluster_instance_policy = env.get_template(policy_filename).render( + partition=partition, + region=region, + account_id=account_id, + cluster_bucket_name="parallelcluster-*", + ) + return boto3.client("iam", region_name=region).create_policy( + PolicyName=iam_policy_name, PolicyDocument=parallel_cluster_instance_policy + )["Policy"]["Arn"] + + @pytest.fixture(scope="class") def vpc_stack(vpc_stacks, region): return vpc_stacks[region] diff --git a/tests/integration-tests/resources/batch_lambda_function_policy.json b/tests/integration-tests/resources/batch_lambda_function_policy.json new file mode 100644 index 0000000000..ed92d0fbf2 --- /dev/null +++ b/tests/integration-tests/resources/batch_lambda_function_policy.json @@ -0,0 +1,44 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": "arn:{{ partition }}:logs:*:*:*", + "Sid": "CloudWatchLogsPolicy" + }, + { + "Action": [ + "ecr:BatchDeleteImage", + "ecr:ListImages" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "ECRPolicy" + }, + { + "Action": [ + "codebuild:BatchGetBuilds", + "codebuild:StartBuild" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "CodeBuildPolicy" + }, + { + "Action": [ + "s3:DeleteBucket", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "S3BucketPolicy" + } + ] +} \ No newline at end of file diff --git a/tests/integration-tests/resources/traditional_lambda_function_policy.json b/tests/integration-tests/resources/traditional_lambda_function_policy.json new file mode 100644 index 0000000000..a3545486ac --- /dev/null +++ b/tests/integration-tests/resources/traditional_lambda_function_policy.json @@ -0,0 +1,64 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Resource": "arn:{{ partition }}:logs:*:*:*", + "Effect": "Allow", + "Sid": "CloudWatchLogsPolicy" + }, + { + "Action": [ + "s3:DeleteBucket", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Resource": [ + "arn:{{ partition }}:s3:::*" + ], + "Effect": "Allow", + "Sid": "S3BucketPolicy" + }, + { + "Action": [ + "ec2:DescribeInstances" + ], + "Resource": "*", + "Effect": "Allow", + "Sid": "DescribeInstances" + }, + { + "Action": [ + "ec2:TerminateInstances" + ], + "Resource": "*", + "Effect": "Allow", + "Sid": "FleetTerminatePolicy" + }, + { + "Action": [ + "dynamodb:GetItem", + "dynamodb:PutItem" + ], + "Resource": "arn:{{ partition }}:dynamodb:{{ region }}:{{ account_id }}:table/parallelcluster-*", + "Effect": "Allow", + "Sid": "DynamoDBTable" + }, + { + "Action": [ + "route53:ListResourceRecordSets", + "route53:ChangeResourceRecordSets" + ], + "Resource": [ + "arn:{{ partition }}:route53:::hostedzone/*" + ], + "Effect": "Allow", + "Sid": "Route53DeletePolicy" + } + ] +} \ No newline at end of file diff --git a/tests/integration-tests/tests/common/utils.py b/tests/integration-tests/tests/common/utils.py index d9ee887512..53bb8172c6 100644 --- a/tests/integration-tests/tests/common/utils.py +++ b/tests/integration-tests/tests/common/utils.py @@ -144,3 +144,8 @@ def _assert_ami_is_available(region, ami_id): def get_installed_parallelcluster_version(): """Get the version of the installed aws-parallelcluster package.""" return pkg_resources.get_distribution("aws-parallelcluster").version + + +def get_sts_endpoint(region): + """Get regionalized STS endpoint.""" + return "https://sts.{0}.{1}".format(region, "amazonaws.com.cn" if region.startswith("cn-") else "amazonaws.com") diff --git a/tests/integration-tests/tests/iam/test_iam.py b/tests/integration-tests/tests/iam/test_iam.py new file mode 100644 index 0000000000..632071f269 --- /dev/null +++ b/tests/integration-tests/tests/iam/test_iam.py @@ -0,0 +1,133 @@ +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "LICENSE.txt" file accompanying this file. +# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. +# See the License for the specific language governing permissions and limitations under the License. +import logging +import os +from shutil import copyfile + +import boto3 +import pytest +from assertpy import assert_that +from remote_command_executor import RemoteCommandExecutor + +from tests.common.assertions import assert_no_errors_in_logs + + +@pytest.mark.usefixtures("os", "instance") +def test_iam_roles( + region, + scheduler, + common_pcluster_policies, + role_factory, + pcluster_config_reader, + clusters_factory, + cluster_model, + test_datadir, +): + is_awsbatch = scheduler == "awsbatch" + if is_awsbatch: + instance_policies = common_pcluster_policies["awsbatch_instance_policy"] + lambda_policies = common_pcluster_policies["awsbatch_lambda_policy"] + else: + instance_policies = common_pcluster_policies["traditional_instance_policy"] + lambda_policies = common_pcluster_policies["traditional_lambda_policy"] + cluster_role_name = role_factory("ec2", [instance_policies]) + lambda_role_name = role_factory("lambda", [lambda_policies]) + + # Copy the config file template for reuse in update. + config_file_name = cluster_model + ".ini" + config_file_path = os.path.join(str(test_datadir), config_file_name) + updated_config_file_name = cluster_model + ".update.ini" + updated_config_file_path = os.path.join(str(test_datadir), updated_config_file_name) + copyfile(config_file_path, updated_config_file_path) + + cluster_config = pcluster_config_reader( + config_file=config_file_name, ec2_iam_role=cluster_role_name, iam_lambda_role=lambda_role_name + ) + cluster = clusters_factory(cluster_config) + + main_stack_name = "parallelcluster-" + cluster.name + cfn_client = boto3.client("cloudformation", region_name=region) + lambda_client = boto3.client("lambda", region_name=region) + + # Check all CloudFormation stacks after creation + # If scheduler is awsbatch, there will still be IAM roles created. + _check_lambda_role(cfn_client, lambda_client, main_stack_name, lambda_role_name, not is_awsbatch) + + # Test updating the iam_lambda_role + updated_lambda_role_name = role_factory("lambda", [lambda_policies]) + assert_that(updated_lambda_role_name == lambda_role_name).is_false() + cluster.config_file = str( + pcluster_config_reader( + config_file=updated_config_file_name, + ec2_iam_role=cluster_role_name, + iam_lambda_role=updated_lambda_role_name, + ) + ) + cluster.update() + + # Check all CloudFormation stacks after update + _check_lambda_role(cfn_client, lambda_client, main_stack_name, updated_lambda_role_name, not is_awsbatch) + + +def _check_lambda_role(cfn_client, lambda_client, stack_name, lambda_role_name, check_no_role_is_created): + """Test lambda role is attached to all Lambda functions in the stack and its substack.""" + resources = cfn_client.describe_stack_resources(StackName=stack_name)["StackResources"] + for resource in resources: + resource_type = resource["ResourceType"] + if check_no_role_is_created: + # If check_no_role_is_created, check that there is no role created in the stack and its substack. + assert_that(resource_type).is_not_equal_to("AWS::IAM::Role") + if resource_type == "AWS::CloudFormation::Stack": + # Recursively check substacks + _check_lambda_role( + cfn_client, lambda_client, resource["PhysicalResourceId"], lambda_role_name, check_no_role_is_created + ) + if resource_type == "AWS::Lambda::Function": + # Check the role is attached to the Lambda function + lambda_function = lambda_client.get_function(FunctionName=resource["PhysicalResourceId"])["Configuration"] + assert_that(lambda_role_name in lambda_function["Role"]).is_true() + + +@pytest.mark.regions(["ap-northeast-2"]) +@pytest.mark.schedulers(["slurm", "awsbatch"]) +@pytest.mark.oss(["alinux2"]) +@pytest.mark.usefixtures("os", "instance") +def test_iam_policies(region, scheduler, pcluster_config_reader, clusters_factory): + """Test IAM Policies""" + cluster_config = pcluster_config_reader( + iam_policies="arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess, arn:aws:iam::aws:policy/AWSBatchFullAccess" + ) + cluster = clusters_factory(cluster_config) + remote_command_executor = RemoteCommandExecutor(cluster) + + _test_s3_access(remote_command_executor, region) + + if scheduler == "awsbatch": + _test_batch_access(remote_command_executor, region) + + assert_no_errors_in_logs(remote_command_executor, scheduler) + + +def _test_s3_access(remote_command_executor, region): + logging.info("Testing S3 Access") + result = remote_command_executor.run_remote_command(f"AWS_DEFAULT_REGION={region} aws s3 ls").stdout + # An error occurred (AccessDenied) when calling the ListBuckets operation: Access Denied + assert_that(result).does_not_contain("AccessDenied") + + +def _test_batch_access(remote_command_executor, region): + logging.info("Testing AWS Batch Access") + result = remote_command_executor.run_remote_command( + f"AWS_DEFAULT_REGION={region} aws batch describe-compute-environments" + ).stdout + # An error occurred (AccessDeniedException) when calling the DescribeComputeEnvironments operation: ... + assert_that(result).does_not_contain("AccessDeniedException") diff --git a/tests/integration-tests/tests/iam_policies/test_iam_policies/test_iam_policies/pcluster.config.ini b/tests/integration-tests/tests/iam/test_iam/test_iam_policies/pcluster.config.ini similarity index 100% rename from tests/integration-tests/tests/iam_policies/test_iam_policies/test_iam_policies/pcluster.config.ini rename to tests/integration-tests/tests/iam/test_iam/test_iam_policies/pcluster.config.ini diff --git a/tests/integration-tests/tests/iam/test_iam/test_iam_roles/HIT.ini b/tests/integration-tests/tests/iam/test_iam/test_iam_roles/HIT.ini new file mode 100644 index 0000000000..da7e4e06c6 --- /dev/null +++ b/tests/integration-tests/tests/iam/test_iam/test_iam_roles/HIT.ini @@ -0,0 +1,29 @@ +[global] +cluster_template = default + +[aws] +aws_region_name = {{ region }} + +[cluster default] +key_name = {{ key_name }} +vpc_settings = parallelcluster-vpc +scheduler = {{ scheduler }} +master_instance_type = {{ instance }} +base_os = {{ os }} +queue_settings = compute +ec2_iam_role = {{ ec2_iam_role }} +iam_lambda_role = {{ iam_lambda_role }} + +[vpc parallelcluster-vpc] +vpc_id = {{ vpc_id }} +master_subnet_id = {{ public_subnet_id }} +compute_subnet_id = {{ private_subnet_id }} +use_public_ips = false + +[queue compute] +enable_efa = false +enable_efa_gdr = false +compute_resource_settings = default + +[compute_resource default] +instance_type = {{ instance }} diff --git a/tests/integration-tests/tests/iam/test_iam/test_iam_roles/SIT.ini b/tests/integration-tests/tests/iam/test_iam/test_iam_roles/SIT.ini new file mode 100644 index 0000000000..79707cc3a7 --- /dev/null +++ b/tests/integration-tests/tests/iam/test_iam/test_iam_roles/SIT.ini @@ -0,0 +1,28 @@ +[global] +cluster_template = default + +[aws] +aws_region_name = {{ region }} + +[cluster default] +key_name = {{ key_name }} +vpc_settings = parallelcluster-vpc +scheduler = {{ scheduler }} +master_instance_type = {{ instance }} +compute_instance_type = {{ instance }} +base_os = {{ os }} +ec2_iam_role = {{ ec2_iam_role }} +iam_lambda_role = {{ iam_lambda_role }} +{% if scheduler == "awsbatch" %} +min_vcpus = 1 +desired_vcpus = 1 +{% else %} +initial_queue_size = 1 +maintain_initial_size = true +{% endif %} + +[vpc parallelcluster-vpc] +vpc_id = {{ vpc_id }} +master_subnet_id = {{ public_subnet_id }} +compute_subnet_id = {{ private_subnet_id }} +use_public_ips = false diff --git a/tests/integration-tests/tests/iam_policies/test_iam_policies.py b/tests/integration-tests/tests/iam_policies/test_iam_policies.py deleted file mode 100644 index f14a49dd19..0000000000 --- a/tests/integration-tests/tests/iam_policies/test_iam_policies.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). -# You may not use this file except in compliance with the License. -# A copy of the License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "LICENSE.txt" file accompanying this file. -# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. -# See the License for the specific language governing permissions and limitations under the License. -import logging - -import pytest -from assertpy import assert_that -from remote_command_executor import RemoteCommandExecutor - -from tests.common.assertions import assert_no_errors_in_logs - - -@pytest.mark.regions(["ap-northeast-2"]) -@pytest.mark.schedulers(["slurm", "awsbatch"]) -@pytest.mark.oss(["alinux2"]) -@pytest.mark.usefixtures("os", "instance") -def test_iam_policies(region, scheduler, pcluster_config_reader, clusters_factory): - """Test IAM Policies""" - cluster_config = pcluster_config_reader( - iam_policies="arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess, arn:aws:iam::aws:policy/AWSBatchFullAccess" - ) - cluster = clusters_factory(cluster_config) - remote_command_executor = RemoteCommandExecutor(cluster) - - _test_s3_access(remote_command_executor, region) - - if scheduler == "awsbatch": - _test_batch_access(remote_command_executor, region) - - assert_no_errors_in_logs(remote_command_executor, scheduler) - - -def _test_s3_access(remote_command_executor, region): - logging.info("Testing S3 Access") - result = remote_command_executor.run_remote_command("AWS_DEFAULT_REGION={0} aws s3 ls".format(region)).stdout - # An error occurred (AccessDenied) when calling the ListBuckets operation: Access Denied - assert_that(result).does_not_contain("AccessDenied") - - -def _test_batch_access(remote_command_executor, region): - logging.info("Testing AWS Batch Access") - result = remote_command_executor.run_remote_command( - "AWS_DEFAULT_REGION={0} aws batch describe-compute-environments".format(region) - ).stdout - # An error occurred (AccessDeniedException) when calling the DescribeComputeEnvironments operation: ... - assert_that(result).does_not_contain("AccessDeniedException")