From ae37db5b16c7da72aa71d52823c0ea299925302c Mon Sep 17 00:00:00 2001 From: Himani Deshpande Date: Mon, 5 Dec 2022 11:18:19 -0500 Subject: [PATCH 1/8] Add Integration tests for Resource Prefix Add Integration test cases and config file for Iam Resource Prefix in test_iam.py Add user-role-rp.cfn.yaml to provide a User Role for creating test clusters Add the test case in pcluster3.yaml config for Jenkins tests. --- tests/deleteS3.py | 37 + tests/iam_policies/user-role-rp.cfn.yaml | 796 ++++++++++++++++++ .../configs/common/common.yaml | 6 + .../integration-tests/configs/pcluster3.yaml | 6 + tests/integration-tests/tests/iam/test_iam.py | 137 ++- .../pcluster.config.yaml | 49 ++ 6 files changed, 1030 insertions(+), 1 deletion(-) create mode 100644 tests/deleteS3.py create mode 100644 tests/iam_policies/user-role-rp.cfn.yaml create mode 100644 tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/pcluster.config.yaml diff --git a/tests/deleteS3.py b/tests/deleteS3.py new file mode 100644 index 0000000000..298f967b8a --- /dev/null +++ b/tests/deleteS3.py @@ -0,0 +1,37 @@ +import boto3 + +client = boto3.client("s3", region_name="eu-north-1") +response = client.list_buckets() +# print(response['Buckets']) + + +def delete_s3_bucket(bucket_name, region): + """ + Delete an S3 bucket together with all stored objects. + + :param bucket_name: name of the S3 bucket to delete + :param region: region of the bucket + """ + try: + bucket = boto3.resource("s3", region_name=region).Bucket(bucket_name) + bucket.objects.all().delete() + bucket.object_versions.all().delete() + bucket.delete() + except boto3.client("s3").exceptions.NoSuchBucket: + pass + + +for bucket in response["Buckets"]: + if "integ-tests-" in bucket["Name"]: + + print(bucket["Name"]) + delete_s3_bucket(bucket["Name"], "eu-north-1") + +# s3 = boto3.resource('s3') +# s3_bucket = s3.Bucket(bucket['Name']) +# bucket_versioning = s3.BucketVersioning(bucket['Name']) +# if bucket_versioning.status == 'Enabled': +# s3_bucket.object_versions.delete() +# else: +# s3_bucket.objects.all().delete() +# response = client.delete_bucket(Bucket=bucket['Name']) diff --git a/tests/iam_policies/user-role-rp.cfn.yaml b/tests/iam_policies/user-role-rp.cfn.yaml new file mode 100644 index 0000000000..7547afe72c --- /dev/null +++ b/tests/iam_policies/user-role-rp.cfn.yaml @@ -0,0 +1,796 @@ +AWSTemplateFormatVersion: '2010-09-09' + +Parameters: + Region: + Type: String + Default: '*' + + EnableIamAdminAccess: + Description: WARNING - setting this to true grants IAM admin privileges + Type: String + Default: true + AllowedValues: + - true + - false + + EnablePermissionsBoundary: + Description: Force iam:CreateRole and iam:PutRolePolicy to use PermissionsBoundary + Type: String + Default: false + AllowedValues: + - true + - false + EnableFSxS3Access: + Description: | + When set to true the ParallelCluster API can access, write to the S3 buckets specified in the Filed FsxS3Bucket, it is needed to import/export from/to S3 when creating an FSx filesystem. + NOTE - setting this to true grants the Lambda function S3 Get*, List* and PutObject privileges on the buckets specified in FsxS3Buckets. + Type: String + Default: true + AllowedValues: + - true + - false + + FsxS3Buckets: + Description: | + Comma separated list of S3 bucket ARNs, to allow the lambda function to import/export from/to S3 when creating an FSx filesystem. + NOTE - The setting is used only when EnableFSxS3Access is set to true. (example arn:aws:s3:::,arn:aws:s3:::) + Type: String + Default: 'arn:*:s3:::integ-tests-*' + AllowedPattern: ^((arn:[a-z\-\*]*:s3:[a-z0-9\-]*:([0-9]{12})*:[^,\s\/]+)?(,arn:[a-z\-\*]*:s3:[a-z0-9\-]*:([0-9]{12})*:[^,\s\/]+)*)$|^\*$ + ConstraintDescription: | + The list of S3 buckets is incorrectly formatted. The list should have the format: arn::s3:::[,arn::s3:::,...] + Example: arn:aws:s3:::test-bucket-1,arn:aws:s3:::test-bucket-2,arn:aws:s3:::test-bucket-3 + +Conditions: + EnableIamPolicy: !Equals [!Ref EnableIamAdminAccess, true] + EnablePermissionsBoundary: !Equals [!Ref EnablePermissionsBoundary, true] + IsMultiRegion: !Equals [!Ref Region, '*'] + CreateIamResources: !Equals [true, true] # to keep aligned the resources in the API stack + EnableFSxS3AccessCondition: !And + - !Equals [!Ref EnableFSxS3Access, true] + - !Condition CreateIamResources + UseAllBucketsForFSxS3: !Equals [!Ref FsxS3Buckets, "*"] + +Resources: + + ParallelClusterUserRole: + Type: AWS::IAM::Role + Properties: + Path: /parallelcluster/ + AssumeRolePolicyDocument: + Statement: + - Effect: Allow + Action: sts:AssumeRole + Principal: + AWS: + - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:root' + ManagedPolicyArns: + - !Ref ParallelClusterClusterPolicy + - !Ref ParallelClusterClusterPolicyBatch + - !Ref ParallelClusterLogRetrievalPolicy + PermissionsBoundary: !Ref PermissionsBoundaryPolicy + + ### IAM POLICIES + + DefaultParallelClusterIamAdminPolicy: + Type: AWS::IAM::ManagedPolicy + Condition: EnableIamPolicy + Properties: + Roles: + - !Ref ParallelClusterUserRole + PolicyDocument: + Version: '2012-10-17' + Statement: + - Action: + - iam:CreateServiceLinkedRole + - iam:DeleteRole + - iam:TagRole + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* + Effect: Allow + Sid: IamRole + - Action: + - iam:CreateRole + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* + Effect: Allow + Condition: !If + - EnablePermissionsBoundary + - StringEquals: + iam:PermissionsBoundary: + - !Ref PermissionsBoundaryPolicy + - !Ref AWS::NoValue + Sid: IamCreateRole + - Action: + - iam:PutRolePolicy + - iam:DeleteRolePolicy + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* + Effect: Allow + Sid: IamInlinePolicy + Condition: !If + - EnablePermissionsBoundary + - StringEquals: + iam:PermissionsBoundary: + - !Ref PermissionsBoundaryPolicy + - !Ref AWS::NoValue + - Action: + - iam:AttachRolePolicy + - iam:DetachRolePolicy + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* + Condition: + ArnLike: + iam:PolicyARN: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/parallelcluster* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/parallelcluster/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/*/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::aws:policy/CloudWatchAgentServerPolicy + - !Sub arn:${AWS::Partition}:iam::aws:policy/AmazonSSMManagedInstanceCore + - !Sub arn:${AWS::Partition}:iam::aws:policy/AWSBatchFullAccess + - !Sub arn:${AWS::Partition}:iam::aws:policy/AmazonS3ReadOnlyAccess + - !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AWSBatchServiceRole + - !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role + - !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy + - !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AmazonEC2SpotFleetTaggingRole + - !Sub arn:${AWS::Partition}:iam::aws:policy/EC2InstanceProfileForImageBuilder + - !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole + StringEquals: !If + - EnablePermissionsBoundary + - iam:PermissionsBoundary: + - !Ref PermissionsBoundaryPolicy + - !Ref AWS::NoValue + Effect: Allow + Sid: IamPolicy + + ### CLUSTER ACTIONS POLICIES + + ParallelClusterClusterPolicyBatch: + Type: AWS::IAM::ManagedPolicy + Condition: CreateIamResources + Properties: + PolicyDocument: + Version: '2012-10-17' + Statement: + - Action: + - iam:GetRole + - iam:PassRole + Resource: + - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/*' + - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-*' + - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*' + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/name-prefix-* + Effect: Allow + Condition: + StringEqualsIfExists: + iam:PassedToService: + - ecs-tasks.amazonaws.com + - batch.amazonaws.com + - codebuild.amazonaws.com + Sid: IamPassRole + - Action: + - iam:CreateServiceLinkedRole + - iam:DeleteServiceLinkedRole + Resource: + # AWS Batch creates a service linked role automatically for the ComputeEnvironment + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/aws-service-role/batch.amazonaws.com/* + Effect: Allow + Condition: + StringEquals: + iam:AWSServiceName: + - batch.amazonaws.com + - Action: + - codebuild:* + Resource: !Sub arn:${AWS::Partition}:codebuild:${Region}:${AWS::AccountId}:project/pcluster-* + Effect: Allow + - Action: + - ecr:* + Resource: '*' + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: ECR + - Action: + - batch:* + Resource: '*' + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: Batch + - Action: + - events:* + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Resource: '*' + Sid: AmazonCloudWatchEvents + - Action: + - ecs:DescribeContainerInstances + - ecs:ListContainerInstances + Resource: '*' + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: ECS + + FSxS3AccessPolicy: + Type: AWS::IAM::Policy + Condition: EnableFSxS3AccessCondition + Properties: + PolicyName: FSxS3AccessPolicy + PolicyDocument: + Version: '2012-10-17' + Statement: + - Action: + - iam:CreateServiceLinkedRole + - iam:AttachRolePolicy + - iam:PutRolePolicy + Resource: !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/aws-service-role/s3.data-source.lustre.fsx.amazonaws.com/* + Effect: Allow + Sid: FSxS3PoliciesAttach + - Action: + - s3:Get* + - s3:List* + - s3:PutObject + Resource: !Split + - "," + - !If + - UseAllBucketsForFSxS3 + - "*" + - !Sub ["${FsxS3Buckets},${FsxS3BucketsObjects}", FsxS3BucketsObjects: !Join ["/*,", !Split [",", !Sub "${FsxS3Buckets}/*"]]] + Effect: Allow + Sid: EnableFSxS3Access + Roles: + - !Ref ParallelClusterUserRole + + ParallelClusterClusterPolicy: + Type: AWS::IAM::ManagedPolicy + Condition: CreateIamResources + Properties: + PolicyDocument: + Version: '2012-10-17' + Statement: + - Action: + - ec2:Describe* + Resource: '*' + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: EC2Read + - Action: + - ec2:AllocateAddress + - ec2:AssociateAddress + - ec2:AttachNetworkInterface + - ec2:AuthorizeSecurityGroupEgress + - ec2:AuthorizeSecurityGroupIngress + - ec2:CreateLaunchTemplate + - ec2:CreateLaunchTemplateVersion + - ec2:CreateNetworkInterface + - ec2:CreatePlacementGroup + - ec2:CreateSecurityGroup + - ec2:CreateSnapshot + - ec2:CreateTags + - ec2:CreateVolume + - ec2:DeleteLaunchTemplate + - ec2:DeleteNetworkInterface + - ec2:DeletePlacementGroup + - ec2:DeleteSecurityGroup + - ec2:DeleteVolume + - ec2:DisassociateAddress + - ec2:ModifyLaunchTemplate + - ec2:ModifyNetworkInterfaceAttribute + - ec2:ModifyVolume + - ec2:ModifyVolumeAttribute + - ec2:ReleaseAddress + - ec2:RevokeSecurityGroupEgress + - ec2:RevokeSecurityGroupIngress + - ec2:RunInstances + - ec2:TerminateInstances + Resource: '*' + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: EC2Write + - Action: + - dynamodb:DescribeTable + - dynamodb:ListTagsOfResource + - dynamodb:CreateTable + - dynamodb:DeleteTable + - dynamodb:GetItem + - dynamodb:PutItem + - dynamodb:UpdateItem + - dynamodb:Query + - dynamodb:TagResource + Resource: !Sub arn:${AWS::Partition}:dynamodb:${Region}:${AWS::AccountId}:table/parallelcluster-* + Effect: Allow + Sid: DynamoDB + - Action: + - route53:ChangeResourceRecordSets + - route53:ChangeTagsForResource + - route53:CreateHostedZone + - route53:DeleteHostedZone + - route53:GetChange + - route53:GetHostedZone + - route53:ListResourceRecordSets + - route53:ListQueryLoggingConfigs + Resource: '*' + Effect: Allow + Sid: Route53HostedZones + - Action: + - cloudformation:* + Resource: '*' + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: CloudFormation + - Action: + - cloudwatch:PutDashboard + - cloudwatch:ListDashboards + - cloudwatch:DeleteDashboards + - cloudwatch:GetDashboard + Resource: '*' + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: CloudWatch + - Action: + - iam:GetRole + - iam:GetRolePolicy + - iam:GetPolicy + - iam:SimulatePrincipalPolicy + - iam:GetInstanceProfile + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/parallelcluster/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/parallelcluster/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/*/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/*/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/path-prefix/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/name-prefix-* + Effect: Allow + Sid: IamRead + - Action: + - iam:CreateInstanceProfile + - iam:DeleteInstanceProfile + - iam:AddRoleToInstanceProfile + - iam:RemoveRoleFromInstanceProfile + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/parallelcluster/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/*/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/* + Effect: Allow + Sid: IamInstanceProfile + - Action: + - iam:GetRole + - iam:PassRole + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* + Effect: Allow + Condition: + StringEqualsIfExists: + iam:PassedToService: + - lambda.amazonaws.com + - ec2.amazonaws.com + - ec2.amazonaws.com.cn + - spotfleet.amazonaws.com + Sid: IamPassRole + - Action: + - iam:CreateServiceLinkedRole + - iam:DeleteServiceLinkedRole + Resource: '*' + Effect: Allow + Condition: + StringEquals: + iam:AWSServiceName: + - fsx.amazonaws.com + - s3.data-source.lustre.fsx.amazonaws.com + - Action: + - lambda:CreateFunction + - lambda:TagResource + - lambda:DeleteFunction + - lambda:GetFunctionConfiguration + - lambda:GetFunction + - lambda:InvokeFunction + - lambda:AddPermission + - lambda:RemovePermission + - lambda:UpdateFunctionConfiguration + - lambda:ListTags + - lambda:UntagResource + Resource: + - !Sub arn:${AWS::Partition}:lambda:${Region}:${AWS::AccountId}:function:parallelcluster-* + - !Sub arn:${AWS::Partition}:lambda:${Region}:${AWS::AccountId}:function:pcluster-* + Effect: Allow + Sid: Lambda + - Action: + - s3:* + Resource: + - !Sub arn:${AWS::Partition}:s3:::parallelcluster-* + - !Sub arn:${AWS::Partition}:s3:::aws-parallelcluster-* + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: S3ResourcesBucket + - Action: + - s3:Get* + - s3:List* + Resource: !Sub arn:${AWS::Partition}:s3:::${Region}-aws-parallelcluster* + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: S3ParallelClusterReadOnly + - Action: + - fsx:* + Resource: + - !Sub arn:${AWS::Partition}:fsx:${Region}:${AWS::AccountId}:* + Effect: Allow + Sid: FSx + - Action: + - elasticfilesystem:* + Resource: + - !Sub arn:${AWS::Partition}:elasticfilesystem:${Region}:${AWS::AccountId}:* + Effect: Allow + Sid: EFS + - Action: + - logs:DeleteLogGroup + - logs:PutRetentionPolicy + - logs:DescribeLogGroups + - logs:CreateLogGroup + - logs:TagResource + - logs:UntagResource + Resource: '*' + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: CloudWatchLogs + - Action: + - resource-groups:ListGroupResources + - resource-groups:GetGroupConfiguration + Resource: '*' + Effect: Allow + Sid: ResourceGroupRead + + ### LOG COMMANDS + + ParallelClusterLogRetrievalPolicy: + Type: AWS::IAM::ManagedPolicy + Condition: CreateIamResources + Properties: + Description: Policies needed to retrieve cluster and images logs + PolicyDocument: + Version: '2012-10-17' + Statement: + - Action: + - logs:DescribeLogGroups + - logs:FilterLogEvents + - logs:GetLogEvents + - logs:CreateExportTask + - logs:DescribeLogStreams + - logs:DescribeExportTasks + Resource: '*' + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + + + ### PERMISSIONS BOUNDARY + + PermissionsBoundaryPolicy: + Type: AWS::IAM::ManagedPolicy + Properties: + PolicyDocument: + Version: '2012-10-17' + Statement: + - Action: + - route53:ListResourceRecordSets + - route53:ChangeResourceRecordSets + - route53:CreateHostedZone + - route53:ChangeTagsForResource + - route53:DeleteHostedZone + - route53:GetChange + - route53:GetHostedZone + - route53:ListQueryLoggingConfigs + Effect: Allow + Resource: '*' + - Action: ec2:TerminateInstances + Condition: + StringEquals: + ec2:ResourceTag/parallelcluster:node-type: ComputeNode + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + Effect: Allow + Resource: + - !Sub arn:${AWS::Partition}:s3:::${AWS::Region}-aws-parallelcluster/* + - !Sub arn:${AWS::Partition}:s3:::dcv-license.${AWS::Region}/* + - !Sub arn:${AWS::Partition}:s3:::ec2imagebuilder* + - Action: + - iam:GetRole + - iam:PassRole + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/name-prefix-* + Effect: Allow + - Action: + - cloudformation:* + Effect: Allow + Resource: '*' + - Action: + - s3:DeleteObject + - s3:DeleteObjectVersion + - s3:ListBucket + - s3:ListBucketVersions + - s3:GetObject + - s3:PutObject + - s3:GetObjectVersion + Effect: Allow + Resource: + - !Sub arn:${AWS::Partition}:s3:::parallelcluster-*-v1-do-not-delete + - !Sub arn:${AWS::Partition}:s3:::parallelcluster-*-v1-do-not-delete/* + - Action: + - ecr:BatchDeleteImage + - ecr:ListImages + Effect: Allow + Resource: !Sub arn:${AWS::Partition}:ecr:${AWS::Region}:${AWS::AccountId}:repository/*parallelcluster* + - Action: + - lambda:DeleteFunction + - lambda:RemovePermission + - lambda:CreateFunction + - lambda:TagResource + - lambda:GetFunctionConfiguration + - lambda:GetFunction + - lambda:InvokeFunction + - lambda:AddPermission + - lambda:UpdateFunctionConfiguration + - lambda:ListTags + - lambda:UntagResource + Resource: + - !Sub arn:${AWS::Partition}:lambda:${Region}:${AWS::AccountId}:function:parallelcluster-* + - !Sub arn:${AWS::Partition}:lambda:${Region}:${AWS::AccountId}:function:pcluster-* + - !Sub arn:${AWS::Partition}:lambda:${AWS::Region}:${AWS::AccountId}:function:ParallelClusterImage-* + Effect: Allow + - Action: + - SNS:GetTopicAttributes + - SNS:DeleteTopic + - SNS:GetSubscriptionAttributes + - SNS:Unsubscribe + Resource: !Sub 'arn:${AWS::Partition}:sns:${AWS::Region}:${AWS::AccountId}:ParallelClusterImage-*' + Effect: Allow + # From arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore + - Effect: Allow + Action: + - ssm:DescribeAssociation + - ssm:GetDeployablePatchSnapshotForInstance + - ssm:GetDocument + - ssm:DescribeDocument + - ssm:GetManifest + - ssm:GetParameter + - ssm:GetParameters + - ssm:ListAssociations + - ssm:ListInstanceAssociations + - ssm:PutInventory + - ssm:PutComplianceItems + - ssm:PutConfigurePackageResult + - ssm:UpdateAssociationStatus + - ssm:UpdateInstanceAssociationStatus + - ssm:UpdateInstanceInformation + - ec2messages:AcknowledgeMessage + - ec2messages:DeleteMessage + - ec2messages:FailMessage + - ec2messages:GetEndpoint + - ec2messages:GetMessages + - ec2messages:SendReply + - ssmmessages:CreateControlChannel + - ssmmessages:CreateDataChannel + - ssmmessages:OpenControlChannel + - ssmmessages:OpenDataChannel + Resource: "*" + - Effect: Allow + Action: + - logs:CreateLogStream + - logs:CreateLogGroup + - logs:PutLogEvents + - logs:TagResource + - logs:UntagResource + - logs:DeleteLogGroup + - logs:PutRetentionPolicy + - logs:DescribeLogGroups + Resource: '*' + # Resource Prefix specific Actions + - Action: + - iam:CreateRole + - iam:AttachRolePolicy + - iam:DetachRolePolicy + - iam:PutRolePermissionsBoundary + - iam:TagRole + - iam:UntagRole + - iam:ListRoleTags + - iam:ListRolePolicies + - iam:GetRolePolicy + - iam:PutRolePolicy + - iam:ListAttachedRolePolicies + - iam:DeleteRole + - iam:ListInstanceProfiles + - iam:ListInstanceProfilesForRole + - iam:DeleteRolePolicy + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* + Effect: Allow + - Action: + - iam:CreateInstanceProfile + - iam:DeleteInstanceProfile + - iam:AddRoleToInstanceProfile + - iam:RemoveRoleFromInstanceProfile + - iam:TagInstanceProfile + - iam:UntagInstanceProfile + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/*/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/* + Effect: Allow + - Action: + - iam:CreatePolicy + - iam:CreatePolicyVersion + - iam:DeletePolicyVersion + - iam:GetPolicyVersion + - iam:GetPolicy + - iam:DeletePolicy + - iam:ListInstanceProfiles + - iam:ListInstanceProfilesForRole + - iam:ListEntitiesForPolicy + - iam:ListPolicyVersions + - iam:TagPolicy + - iam:UntagPolicy + Resource: + - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/parallelcluster/*' + - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/*/name-prefix-*' + - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/path-prefix/*' + Effect: Allow + - Action: + - ec2:Describe* + - ec2:AllocateAddress + - ec2:AssociateAddress + - ec2:AttachNetworkInterface + - ec2:AuthorizeSecurityGroupEgress + - ec2:AuthorizeSecurityGroupIngress + - ec2:AttachVolume + - ec2:CreateLaunchTemplate + - ec2:CreateLaunchTemplateVersion + - ec2:CreateNetworkInterface + - ec2:CreatePlacementGroup + - ec2:CreateSecurityGroup + - ec2:CreateSnapshot + - ec2:CreateTags + - ec2:CreateVolume + - ec2:DeleteLaunchTemplate + - ec2:DeleteNetworkInterface + - ec2:DeletePlacementGroup + - ec2:DeleteSecurityGroup + - ec2:DeleteVolume + - ec2:DisassociateAddress + - ec2:ModifyLaunchTemplate + - ec2:ModifyNetworkInterfaceAttribute + - ec2:ModifyVolume + - ec2:ModifyVolumeAttribute + - ec2:ReleaseAddress + - ec2:RevokeSecurityGroupEgress + - ec2:RevokeSecurityGroupIngress + - ec2:RunInstances + - ec2:TerminateInstances + Resource: '*' + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: EC2Write + - Action: + - dynamodb:DescribeTable + - dynamodb:ListTagsOfResource + - dynamodb:CreateTable + - dynamodb:DeleteTable + - dynamodb:GetItem + - dynamodb:PutItem + - dynamodb:UpdateItem + - dynamodb:Query + - dynamodb:TagResource + Resource: !Sub arn:${AWS::Partition}:dynamodb:${Region}:${AWS::AccountId}:table/parallelcluster-* + Effect: Allow + Sid: DynamoDB + - Action: + - cloudwatch:PutDashboard + - cloudwatch:ListDashboards + - cloudwatch:DeleteDashboards + - cloudwatch:GetDashboard + Resource: '*' + Effect: Allow + Condition: !If + - IsMultiRegion + - !Ref AWS::NoValue + - StringEquals: + aws:RequestedRegion: + - !Ref Region + Sid: CloudWatch + - Action: + - iam:CreateServiceLinkedRole + - iam:DeleteServiceLinkedRole + - iam:AttachRolePolicy + - iam:PutRolePolicy + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/aws-service-role/* + Effect: Allow + + +Outputs: + ParallelClusterUserRole: + Value: !GetAtt ParallelClusterUserRole.Arn diff --git a/tests/integration-tests/configs/common/common.yaml b/tests/integration-tests/configs/common/common.yaml index 9e05cefe6a..1fbb92a8f7 100644 --- a/tests/integration-tests/configs/common/common.yaml +++ b/tests/integration-tests/configs/common/common.yaml @@ -294,6 +294,12 @@ iam: instances: {{ common.INSTANCES_DEFAULT_X86 }} oss: ["alinux2"] schedulers: ["slurm", "awsbatch"] + test_iam.py::test_iam_resource_prefix: + dimensions: + - regions: [ "eu-north-1" ] + instances: {{ common.INSTANCES_DEFAULT_X86 }} + oss: [ "alinux2" ] + schedulers: [ "slurm" ] intel_hpc: test_intel_hpc.py::test_intel_hpc: dimensions: diff --git a/tests/integration-tests/configs/pcluster3.yaml b/tests/integration-tests/configs/pcluster3.yaml index f39050eac8..c780931cb1 100644 --- a/tests/integration-tests/configs/pcluster3.yaml +++ b/tests/integration-tests/configs/pcluster3.yaml @@ -50,6 +50,12 @@ test-suites: instances: {{ common.INSTANCES_DEFAULT_X86 }} oss: ["alinux2"] schedulers: ["slurm"] + test_iam.py::test_iam_resource_prefix: + dimensions: + - regions: [ "eu-north-1" ] + schedulers: [ "slurm" ] + oss: [ "alinux2" ] + instances: {{ common.INSTANCES_DEFAULT_X86 }} schedulers: test_awsbatch.py::test_awsbatch: dimensions: diff --git a/tests/integration-tests/tests/iam/test_iam.py b/tests/integration-tests/tests/iam/test_iam.py index c8997cb451..bd7aae71cf 100644 --- a/tests/integration-tests/tests/iam/test_iam.py +++ b/tests/integration-tests/tests/iam/test_iam.py @@ -10,6 +10,7 @@ # This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. # See the License for the specific language governing permissions and limitations under the License. import logging +import os import os as os_lib from shutil import copyfile @@ -17,9 +18,12 @@ import pytest import yaml from assertpy import assert_that +from cfn_stacks_factory import CfnStack, CfnStacksFactory +from framework.credential_providers import register_cli_credentials_for_region +from framework.tests_configuration.config_utils import get_all_regions from remote_command_executor import RemoteCommandExecutor from s3_common_utils import check_s3_read_resource, check_s3_read_write_resource, get_policy_resources -from utils import wait_for_computefleet_changed +from utils import generate_stack_name, wait_for_computefleet_changed from tests.common.assertions import assert_no_errors_in_logs from tests.schedulers.test_awsbatch import _test_job_submission as _test_job_submission_awsbatch @@ -332,3 +336,134 @@ def test_s3_read_write_resource(region, pcluster_config_reader, s3_bucket_factor # Check S3 resources check_s3_read_resource(region, cluster, get_policy_resources(config, enable_write_access=False)) check_s3_read_write_resource(region, cluster, get_policy_resources(config, enable_write_access=True)) + + +@pytest.mark.parametrize("iam_resource_prefix", ["name-prefix-", "/path-prefix/", "/path-prefix/name-prefix-"]) +@pytest.mark.usefixtures("os", "instance") +def test_iam_resource_prefix( + register_resource_prefix_cli_credentials, + region, + pcluster_config_reader, + clusters_factory, + test_datadir, + scheduler_commands_factory, + s3_bucket_factory, + s3_bucket, + iam_resource_prefix, +): + bucket_name = s3_bucket + cfn_client, _, iam_client, _ = _create_boto3_clients(region) + + create_config, update_config = _get_config_create_and_update(test_datadir) + + cluster_config = pcluster_config_reader( + config_file=create_config, + min_count=1, + bucket=bucket_name, + iam_resource_prefix=iam_resource_prefix, + ) + cluster = clusters_factory(cluster_config) + _test_iam_resource_in_cluster(cfn_client, iam_client, cluster.name, iam_resource_prefix) + +def _split_resource_prefix(resource_prefix): + """To split Path and name prefix from Resource Prefix.""" + if resource_prefix: + split_index = resource_prefix.rfind("/") + 1 + return ( + None + if split_index == 0 + else resource_prefix + if split_index == len(resource_prefix) + else resource_prefix[:split_index], + None + if split_index == len(resource_prefix) + else resource_prefix + if split_index == 0 + else resource_prefix[split_index:], + ) + return None, None + + +def _check_iam_resource_prefix(resource_arn_list, iam_resource_prefix): + """Check the path and name of IAM resource ( Roles, policy and Instance profiles).""" + iam_path, iam_name_prefix = _split_resource_prefix(iam_resource_prefix) + for resource in resource_arn_list: + if "arn:aws:iam:" in resource: + if iam_path: + assert_that(resource).contains(iam_path) + else: + assert_that(resource).contains("/parallelcluster/") + if iam_name_prefix: + assert_that(resource).contains(iam_name_prefix) + + +def _test_iam_resource_in_cluster(cfn_client, iam_client, stack_name, iam_resource_prefix): + """Test IAM resources ( Roles, policy and Instance profiles).""" + + resources = cfn_client.describe_stack_resources(StackName=stack_name)["StackResources"] + resource_arn_list = [] + + for resource in resources: + resource_type = resource["ResourceType"] + if resource_type == "AWS::IAM::Role": + + resource_arn_list.append(iam_client.get_role(RoleName=resource["PhysicalResourceId"])["Role"]["Arn"]) + resource_arn_list.extend( + iam_client.list_role_policies(RoleName=resource["PhysicalResourceId"])["PolicyNames"] + ) + if resource_type == "AWS::IAM::InstanceProfile": + resource_arn_list.append( + iam_client.get_instance_profile(InstanceProfileName=resource["PhysicalResourceId"])["InstanceProfile"][ + "Arn" + ] + ) + _check_iam_resource_prefix(resource_arn_list, iam_resource_prefix) + + +@pytest.fixture(scope="class") +def initialize_resource_prefix_cli_creds(request): + """Create an IAM Role with Permission Boundary for testing Resource Prefix Feature.""" + + if request.config.getoption("use_default_iam_credentials"): + logging.info("Using default IAM credentials to run pcluster commands") + yield None + else: + stack_factory = CfnStacksFactory(request.config.getoption("credential")) + + regions = request.config.getoption("regions") or get_all_regions(request.config.getoption("tests_config")) + stack_template_path = os.path.join("..", "iam_policies", "user-role-rp.cfn.yaml") + with open(stack_template_path, encoding="utf-8") as stack_template_file: + stack_template_data = stack_template_file.read() + cli_creds = {} + for region in regions: + if request.config.getoption("iam_user_role_stack_name"): + stack_name = request.config.getoption("iam_user_role_stack_name") + logging.info(f"Using stack {stack_name} in region {region}") + stack = CfnStack( + name=stack_name, region=region, capabilities=["CAPABILITY_IAM"], template=stack_template_data + ) + else: + logging.info("Creating IAM roles for pcluster CLI") + stack_name = generate_stack_name( + "integ-tests-iam-rp-user-role", request.config.getoption("stackname_suffix") + ) + stack = CfnStack( + name=stack_name, region=region, capabilities=["CAPABILITY_IAM"], template=stack_template_data + ) + + stack_factory.create_stack(stack) + cli_creds[region] = stack.cfn_outputs["ParallelClusterUserRole"] + + yield cli_creds + + if not request.config.getoption("no_delete"): + stack_factory.delete_all_stacks() + else: + logging.warning("Skipping deletion of CFN stacks because --no-delete option is set") + + +@pytest.fixture(scope="class") +def register_resource_prefix_cli_credentials(initialize_resource_prefix_cli_creds): + if initialize_resource_prefix_cli_creds: + for region, creds in initialize_resource_prefix_cli_creds.items(): + register_cli_credentials_for_region(region, creds) diff --git a/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/pcluster.config.yaml b/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/pcluster.config.yaml new file mode 100644 index 0000000000..dc63cee553 --- /dev/null +++ b/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/pcluster.config.yaml @@ -0,0 +1,49 @@ +Image: + Os: {{ os }} +Iam: + ResourcePrefix: {{ iam_resource_prefix}} +HeadNode: + InstanceType: {{ instance }} + Networking: + SubnetId: {{ public_subnet_id }} + Ssh: + KeyName: {{ key_name }} + Iam: + S3Access: + - BucketName: {{ bucket }} + KeyName: read_only/ + EnableWriteAccess: false + - BucketName: {{ bucket }} + KeyName: read_and_write/ + EnableWriteAccess: true +Scheduling: + Scheduler: {{ scheduler }} + SlurmQueues: + - Name: queue-0 + ComputeResources: + - Name: compute-resource-0 + InstanceType: {{ instance }} + MinCount: {{ min_count }} + Networking: + SubnetIds: + - {{ private_subnet_id }} + - Name: queue-1 + ComputeResources: + - Name: compute-resource-0 + InstanceType: {{ instance }} + MinCount: {{ min_count }} + Networking: + SubnetIds: + - {{ private_subnet_id }} + Iam: + S3Access: + - BucketName: {{ bucket }} + KeyName: read_only/ + EnableWriteAccess: false + - BucketName: {{ bucket }} + KeyName: read_and_write/ + EnableWriteAccess: true +DevSettings: + Timeouts: + HeadNodeBootstrapTimeout: 1234 # timeout in seconds + From b95c3229267a5231a30df787483aff36185ce5d9 Mon Sep 17 00:00:00 2001 From: Himani Deshpande Date: Mon, 5 Dec 2022 15:15:39 -0500 Subject: [PATCH 2/8] Remove File deleteS3.py --- tests/deleteS3.py | 37 ------------------------------------- 1 file changed, 37 deletions(-) delete mode 100644 tests/deleteS3.py diff --git a/tests/deleteS3.py b/tests/deleteS3.py deleted file mode 100644 index 298f967b8a..0000000000 --- a/tests/deleteS3.py +++ /dev/null @@ -1,37 +0,0 @@ -import boto3 - -client = boto3.client("s3", region_name="eu-north-1") -response = client.list_buckets() -# print(response['Buckets']) - - -def delete_s3_bucket(bucket_name, region): - """ - Delete an S3 bucket together with all stored objects. - - :param bucket_name: name of the S3 bucket to delete - :param region: region of the bucket - """ - try: - bucket = boto3.resource("s3", region_name=region).Bucket(bucket_name) - bucket.objects.all().delete() - bucket.object_versions.all().delete() - bucket.delete() - except boto3.client("s3").exceptions.NoSuchBucket: - pass - - -for bucket in response["Buckets"]: - if "integ-tests-" in bucket["Name"]: - - print(bucket["Name"]) - delete_s3_bucket(bucket["Name"], "eu-north-1") - -# s3 = boto3.resource('s3') -# s3_bucket = s3.Bucket(bucket['Name']) -# bucket_versioning = s3.BucketVersioning(bucket['Name']) -# if bucket_versioning.status == 'Enabled': -# s3_bucket.object_versions.delete() -# else: -# s3_bucket.objects.all().delete() -# response = client.delete_bucket(Bucket=bucket['Name']) From 0308292bb209f0cbcd4f3333240c653a518eea16 Mon Sep 17 00:00:00 2001 From: Himani Deshpande Date: Wed, 7 Dec 2022 16:14:24 -0500 Subject: [PATCH 3/8] Change according to PR Update the pcluster.config.yaml file to remove Iam section and use _inject_resource_in_config() to inject Iam and ResourcePrefix Section Change scope of initialize and register_prefix_cli_credentials from class to default(function) level Update _test_iam_resource_in_cluster() to add Cluster creation verification Update test_iam_resource_prefix to remove duplication of user-role-rp for each value of iam_resource_prefix_list test and improve performance. Change position of user-role-iam-resource-prefix.cfn.yaml to the tests folder directory Remove update_config variable from test_iam_resource_prefix PR Link: https://github.com/aws/aws-parallelcluster/pull/4652 --- tests/integration-tests/tests/iam/test_iam.py | 63 ++++++++++++++----- .../pcluster.config.yaml | 12 +--- .../user-role-iam-resource-prefix.cfn.yaml} | 0 3 files changed, 47 insertions(+), 28 deletions(-) rename tests/{iam_policies/user-role-rp.cfn.yaml => integration-tests/tests/iam/test_iam/test_iam_resource_prefix/user-role-iam-resource-prefix.cfn.yaml} (100%) diff --git a/tests/integration-tests/tests/iam/test_iam.py b/tests/integration-tests/tests/iam/test_iam.py index bd7aae71cf..fc089b5608 100644 --- a/tests/integration-tests/tests/iam/test_iam.py +++ b/tests/integration-tests/tests/iam/test_iam.py @@ -10,7 +10,6 @@ # This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. # See the License for the specific language governing permissions and limitations under the License. import logging -import os import os as os_lib from shutil import copyfile @@ -23,7 +22,7 @@ from framework.tests_configuration.config_utils import get_all_regions from remote_command_executor import RemoteCommandExecutor from s3_common_utils import check_s3_read_resource, check_s3_read_write_resource, get_policy_resources -from utils import generate_stack_name, wait_for_computefleet_changed +from utils import dict_add_nested_key, generate_stack_name, wait_for_computefleet_changed from tests.common.assertions import assert_no_errors_in_logs from tests.schedulers.test_awsbatch import _test_job_submission as _test_job_submission_awsbatch @@ -338,7 +337,6 @@ def test_s3_read_write_resource(region, pcluster_config_reader, s3_bucket_factor check_s3_read_write_resource(region, cluster, get_policy_resources(config, enable_write_access=True)) -@pytest.mark.parametrize("iam_resource_prefix", ["name-prefix-", "/path-prefix/", "/path-prefix/name-prefix-"]) @pytest.mark.usefixtures("os", "instance") def test_iam_resource_prefix( register_resource_prefix_cli_credentials, @@ -349,21 +347,22 @@ def test_iam_resource_prefix( scheduler_commands_factory, s3_bucket_factory, s3_bucket, - iam_resource_prefix, ): bucket_name = s3_bucket + iam_resource_prefix_list = ["name-prefix-", "/path-prefix/", "/path-prefix/name-prefix-"] cfn_client, _, iam_client, _ = _create_boto3_clients(region) - - create_config, update_config = _get_config_create_and_update(test_datadir) - - cluster_config = pcluster_config_reader( + create_config, _ = _get_config_create_and_update(test_datadir) + pcluster_config_reader( config_file=create_config, min_count=1, bucket=bucket_name, - iam_resource_prefix=iam_resource_prefix, ) - cluster = clusters_factory(cluster_config) - _test_iam_resource_in_cluster(cfn_client, iam_client, cluster.name, iam_resource_prefix) + + for iam_resource_prefix in iam_resource_prefix_list: + cluster_config = _update_config_with_iam_resource_prefix(test_datadir, iam_resource_prefix) + cluster = clusters_factory(cluster_config) + _test_iam_resource_in_cluster(cfn_client, iam_client, cluster.name, iam_resource_prefix) + def _split_resource_prefix(resource_prefix): """To split Path and name prefix from Resource Prefix.""" @@ -398,7 +397,13 @@ def _check_iam_resource_prefix(resource_arn_list, iam_resource_prefix): def _test_iam_resource_in_cluster(cfn_client, iam_client, stack_name, iam_resource_prefix): - """Test IAM resources ( Roles, policy and Instance profiles).""" + """Test IAM resources ( Roles, policy and Instance profiles) by checking the path and name prefix in AWS IAM and check cluster is created.""" + + # Check for cluster Status + + assert_that(cfn_client.describe_stacks(StackName=stack_name).get("Stacks")[0].get("StackStatus")).is_equal_to( + "CREATE_COMPLETE" + ) resources = cfn_client.describe_stack_resources(StackName=stack_name)["StackResources"] resource_arn_list = [] @@ -420,10 +425,9 @@ def _test_iam_resource_in_cluster(cfn_client, iam_client, stack_name, iam_resour _check_iam_resource_prefix(resource_arn_list, iam_resource_prefix) -@pytest.fixture(scope="class") -def initialize_resource_prefix_cli_creds(request): +@pytest.fixture() +def initialize_resource_prefix_cli_creds(request,test_datadir): """Create an IAM Role with Permission Boundary for testing Resource Prefix Feature.""" - if request.config.getoption("use_default_iam_credentials"): logging.info("Using default IAM credentials to run pcluster commands") yield None @@ -431,7 +435,7 @@ def initialize_resource_prefix_cli_creds(request): stack_factory = CfnStacksFactory(request.config.getoption("credential")) regions = request.config.getoption("regions") or get_all_regions(request.config.getoption("tests_config")) - stack_template_path = os.path.join("..", "iam_policies", "user-role-rp.cfn.yaml") + stack_template_path = os_lib.path.join("..", "iam_policies", test_datadir/"user-role-iam-resource-prefix.cfn.yaml") with open(stack_template_path, encoding="utf-8") as stack_template_file: stack_template_data = stack_template_file.read() cli_creds = {} @@ -462,8 +466,33 @@ def initialize_resource_prefix_cli_creds(request): logging.warning("Skipping deletion of CFN stacks because --no-delete option is set") -@pytest.fixture(scope="class") +@pytest.fixture() def register_resource_prefix_cli_credentials(initialize_resource_prefix_cli_creds): + """Register the credentials for creating a cluster.""" if initialize_resource_prefix_cli_creds: for region, creds in initialize_resource_prefix_cli_creds.items(): register_cli_credentials_for_region(region, creds) + + +def _update_config_with_iam_resource_prefix( + test_datadir, + iam_resource_prefix, + output_file=None, + config_file="pcluster.config.yaml", +): + """Update the config file with iam resource prefix.""" + config_file_path = test_datadir / config_file + if not os_lib.path.isfile(config_file_path): + raise FileNotFoundError(f"Cluster config file not found in the expected dir {config_file_path}") + output_file_path = test_datadir / output_file if output_file else config_file_path + _inject_resource_in_config(output_file_path, iam_resource_prefix, ("Iam", "ResourcePrefix")) + return output_file_path + + +def _inject_resource_in_config(cluster_config, resource_value, resource_keys): + """Injects cluster config file with a given resource key-value.""" + with open(cluster_config, encoding="utf-8") as conf_file: + config_content = yaml.load(conf_file, Loader=yaml.SafeLoader) + dict_add_nested_key(config_content, resource_value, resource_keys) + with open(cluster_config, "w", encoding="utf-8") as conf_file: + yaml.dump(config_content, conf_file) diff --git a/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/pcluster.config.yaml b/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/pcluster.config.yaml index dc63cee553..f58701c635 100644 --- a/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/pcluster.config.yaml +++ b/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/pcluster.config.yaml @@ -1,7 +1,5 @@ Image: Os: {{ os }} -Iam: - ResourcePrefix: {{ iam_resource_prefix}} HeadNode: InstanceType: {{ instance }} Networking: @@ -10,9 +8,6 @@ HeadNode: KeyName: {{ key_name }} Iam: S3Access: - - BucketName: {{ bucket }} - KeyName: read_only/ - EnableWriteAccess: false - BucketName: {{ bucket }} KeyName: read_and_write/ EnableWriteAccess: true @@ -37,13 +32,8 @@ Scheduling: - {{ private_subnet_id }} Iam: S3Access: - - BucketName: {{ bucket }} - KeyName: read_only/ - EnableWriteAccess: false - BucketName: {{ bucket }} KeyName: read_and_write/ EnableWriteAccess: true -DevSettings: - Timeouts: - HeadNodeBootstrapTimeout: 1234 # timeout in seconds + diff --git a/tests/iam_policies/user-role-rp.cfn.yaml b/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/user-role-iam-resource-prefix.cfn.yaml similarity index 100% rename from tests/iam_policies/user-role-rp.cfn.yaml rename to tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/user-role-iam-resource-prefix.cfn.yaml From 38668aa3fab6442030ef40f2852285be1d57dd7b Mon Sep 17 00:00:00 2001 From: Himani Deshpande Date: Fri, 9 Dec 2022 00:52:08 -0500 Subject: [PATCH 4/8] Change Iam Resource Prefix tests to handle one test case Add Iam/ResourcePrefix in pcluster.config.yaml Change user-role-iam-resource-prefix.cfn.yaml to handle /path-prefix/name-prefix- Iam Resource Prefix Add /path-prefix/name-prefix as the only pytest parameter and remove use_default_iam_credentials parameter check condition from initialize_resource_prefix_cli_creds --- tests/integration-tests/tests/iam/test_iam.py | 90 +++++------ .../pcluster.config.yaml | 2 + .../user-role-iam-resource-prefix.cfn.yaml | 143 +++++++++++------- 3 files changed, 128 insertions(+), 107 deletions(-) diff --git a/tests/integration-tests/tests/iam/test_iam.py b/tests/integration-tests/tests/iam/test_iam.py index fc089b5608..53fbd04934 100644 --- a/tests/integration-tests/tests/iam/test_iam.py +++ b/tests/integration-tests/tests/iam/test_iam.py @@ -336,7 +336,7 @@ def test_s3_read_write_resource(region, pcluster_config_reader, s3_bucket_factor check_s3_read_resource(region, cluster, get_policy_resources(config, enable_write_access=False)) check_s3_read_write_resource(region, cluster, get_policy_resources(config, enable_write_access=True)) - +@pytest.mark.parametrize("iam_resource_prefix",[ "/path-prefix/name-prefix-"]) @pytest.mark.usefixtures("os", "instance") def test_iam_resource_prefix( register_resource_prefix_cli_credentials, @@ -347,21 +347,20 @@ def test_iam_resource_prefix( scheduler_commands_factory, s3_bucket_factory, s3_bucket, + iam_resource_prefix, ): bucket_name = s3_bucket - iam_resource_prefix_list = ["name-prefix-", "/path-prefix/", "/path-prefix/name-prefix-"] cfn_client, _, iam_client, _ = _create_boto3_clients(region) create_config, _ = _get_config_create_and_update(test_datadir) - pcluster_config_reader( + cluster_config = pcluster_config_reader( config_file=create_config, min_count=1, bucket=bucket_name, + iam_resource_prefix= iam_resource_prefix ) - for iam_resource_prefix in iam_resource_prefix_list: - cluster_config = _update_config_with_iam_resource_prefix(test_datadir, iam_resource_prefix) - cluster = clusters_factory(cluster_config) - _test_iam_resource_in_cluster(cfn_client, iam_client, cluster.name, iam_resource_prefix) + cluster = clusters_factory(cluster_config) + _test_iam_resource_in_cluster(cfn_client, iam_client, cluster.name, iam_resource_prefix) def _split_resource_prefix(resource_prefix): @@ -428,42 +427,38 @@ def _test_iam_resource_in_cluster(cfn_client, iam_client, stack_name, iam_resour @pytest.fixture() def initialize_resource_prefix_cli_creds(request,test_datadir): """Create an IAM Role with Permission Boundary for testing Resource Prefix Feature.""" - if request.config.getoption("use_default_iam_credentials"): - logging.info("Using default IAM credentials to run pcluster commands") - yield None - else: - stack_factory = CfnStacksFactory(request.config.getoption("credential")) - - regions = request.config.getoption("regions") or get_all_regions(request.config.getoption("tests_config")) - stack_template_path = os_lib.path.join("..", "iam_policies", test_datadir/"user-role-iam-resource-prefix.cfn.yaml") - with open(stack_template_path, encoding="utf-8") as stack_template_file: - stack_template_data = stack_template_file.read() - cli_creds = {} - for region in regions: - if request.config.getoption("iam_user_role_stack_name"): - stack_name = request.config.getoption("iam_user_role_stack_name") - logging.info(f"Using stack {stack_name} in region {region}") - stack = CfnStack( - name=stack_name, region=region, capabilities=["CAPABILITY_IAM"], template=stack_template_data - ) - else: - logging.info("Creating IAM roles for pcluster CLI") - stack_name = generate_stack_name( - "integ-tests-iam-rp-user-role", request.config.getoption("stackname_suffix") - ) - stack = CfnStack( - name=stack_name, region=region, capabilities=["CAPABILITY_IAM"], template=stack_template_data - ) + stack_factory = CfnStacksFactory(request.config.getoption("credential")) + + regions = request.config.getoption("regions") or get_all_regions(request.config.getoption("tests_config")) + stack_template_path = os_lib.path.join("..", "iam_policies", test_datadir/"user-role-iam-resource-prefix.cfn.yaml") + with open(stack_template_path, encoding="utf-8") as stack_template_file: + stack_template_data = stack_template_file.read() + cli_creds = {} + for region in regions: + if request.config.getoption("iam_user_role_stack_name"): + stack_name = request.config.getoption("iam_user_role_stack_name") + logging.info(f"Using stack {stack_name} in region {region}") + stack = CfnStack( + name=stack_name, region=region, capabilities=["CAPABILITY_IAM"], template=stack_template_data + ) + else: + logging.info("Creating IAM roles for pcluster CLI") + stack_name = generate_stack_name( + "integ-tests-iam-rp-user-role", request.config.getoption("stackname_suffix") + ) + stack = CfnStack( + name=stack_name, region=region, capabilities=["CAPABILITY_IAM"], template=stack_template_data + ) - stack_factory.create_stack(stack) - cli_creds[region] = stack.cfn_outputs["ParallelClusterUserRole"] + stack_factory.create_stack(stack) + cli_creds[region] = stack.cfn_outputs["ParallelClusterUserRole"] - yield cli_creds + yield cli_creds - if not request.config.getoption("no_delete"): - stack_factory.delete_all_stacks() - else: - logging.warning("Skipping deletion of CFN stacks because --no-delete option is set") + if not request.config.getoption("no_delete"): + stack_factory.delete_all_stacks() + else: + logging.warning("Skipping deletion of CFN stacks because --no-delete option is set") @pytest.fixture() @@ -474,21 +469,6 @@ def register_resource_prefix_cli_credentials(initialize_resource_prefix_cli_cred register_cli_credentials_for_region(region, creds) -def _update_config_with_iam_resource_prefix( - test_datadir, - iam_resource_prefix, - output_file=None, - config_file="pcluster.config.yaml", -): - """Update the config file with iam resource prefix.""" - config_file_path = test_datadir / config_file - if not os_lib.path.isfile(config_file_path): - raise FileNotFoundError(f"Cluster config file not found in the expected dir {config_file_path}") - output_file_path = test_datadir / output_file if output_file else config_file_path - _inject_resource_in_config(output_file_path, iam_resource_prefix, ("Iam", "ResourcePrefix")) - return output_file_path - - def _inject_resource_in_config(cluster_config, resource_value, resource_keys): """Injects cluster config file with a given resource key-value.""" with open(cluster_config, encoding="utf-8") as conf_file: diff --git a/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/pcluster.config.yaml b/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/pcluster.config.yaml index f58701c635..a98e4cca97 100644 --- a/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/pcluster.config.yaml +++ b/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/pcluster.config.yaml @@ -1,5 +1,7 @@ Image: Os: {{ os }} +Iam: + ResourcePrefix: {{ iam_resource_prefix }} HeadNode: InstanceType: {{ instance }} Networking: diff --git a/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/user-role-iam-resource-prefix.cfn.yaml b/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/user-role-iam-resource-prefix.cfn.yaml index 7547afe72c..20389b8d1a 100644 --- a/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/user-role-iam-resource-prefix.cfn.yaml +++ b/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/user-role-iam-resource-prefix.cfn.yaml @@ -86,17 +86,21 @@ Resources: - iam:DeleteRole - iam:TagRole Resource: - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* Effect: Allow Sid: IamRole - Action: - iam:CreateRole Resource: - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* Effect: Allow Condition: !If - EnablePermissionsBoundary @@ -108,10 +112,12 @@ Resources: - Action: - iam:PutRolePolicy - iam:DeleteRolePolicy - Resource: - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* Effect: Allow Sid: IamInlinePolicy Condition: !If @@ -123,16 +129,19 @@ Resources: - Action: - iam:AttachRolePolicy - iam:DetachRolePolicy - Resource: - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* Condition: ArnLike: iam:PolicyARN: - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/parallelcluster* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/parallelcluster/* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/*/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/path-prefix/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/parallelcluster* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/parallelcluster/* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/*/name-prefix-* - !Sub arn:${AWS::Partition}:iam::aws:policy/CloudWatchAgentServerPolicy - !Sub arn:${AWS::Partition}:iam::aws:policy/AmazonSSMManagedInstanceCore - !Sub arn:${AWS::Partition}:iam::aws:policy/AWSBatchFullAccess @@ -164,10 +173,13 @@ Resources: - iam:GetRole - iam:PassRole Resource: - - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/*' - - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-*' - - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*' - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* +# - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/*' +# - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-*' +# - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*' +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/name-prefix-* Effect: Allow Condition: StringEqualsIfExists: @@ -380,18 +392,24 @@ Resources: - iam:SimulatePrincipalPolicy - iam:GetInstanceProfile Resource: - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/parallelcluster/* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/parallelcluster/* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/*/name-prefix-* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/*/name-prefix-* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/path-prefix/* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/path-prefix/name-prefix-* +# Some of the Inline ParallelCluster Policies dont have a path - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/name-prefix-* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/parallelcluster/* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/parallelcluster/* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/path-prefix/* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/name-prefix-* Effect: Allow Sid: IamRead - Action: @@ -400,18 +418,26 @@ Resources: - iam:AddRoleToInstanceProfile - iam:RemoveRoleFromInstanceProfile Resource: - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/parallelcluster/* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/*/name-prefix-* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/parallelcluster/* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/* Effect: Allow Sid: IamInstanceProfile - Action: - iam:GetRole - iam:PassRole Resource: - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* +# Some of the CleanupRoles have naming convention of /path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* +# Some of the Roles like HN and CN have naming convention of /path-prefix/{clsuter_name}/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* + + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* Effect: Allow Condition: StringEqualsIfExists: @@ -571,10 +597,14 @@ Resources: - iam:GetRole - iam:PassRole Resource: - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/name-prefix-* Effect: Allow - Action: - cloudformation:* @@ -678,10 +708,14 @@ Resources: - iam:ListInstanceProfiles - iam:ListInstanceProfilesForRole - iam:DeleteRolePolicy - Resource: - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* + Resource: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* Effect: Allow - Action: - iam:CreateInstanceProfile @@ -691,10 +725,14 @@ Resources: - iam:TagInstanceProfile - iam:UntagInstanceProfile Resource: - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/*/name-prefix-* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/* Effect: Allow - Action: - iam:CreatePolicy @@ -710,9 +748,10 @@ Resources: - iam:TagPolicy - iam:UntagPolicy Resource: - - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/parallelcluster/*' - - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/*/name-prefix-*' - - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/path-prefix/*' + - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/path-prefix/name-prefix-*' + # - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/parallelcluster/*' +# - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/*/name-prefix-*' +# - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/path-prefix/*' Effect: Allow - Action: - ec2:Describe* From 204552df62570bd5a364e67e62e1f8a7507ca396 Mon Sep 17 00:00:00 2001 From: Himani Deshpande Date: Mon, 12 Dec 2022 21:33:24 -0500 Subject: [PATCH 5/8] Add Test specific CLI credentials in cluster creation Add test specific CLI credentials while creation of cluster and run pcluster commands. Change ClusterFactory's create_cluster() to have test specific CLI credentials as arguments Change user-role-iam-resource-prefix.cfn.yaml to handle /path-prefix/name-prefix- Iam Resource Prefix Remove register_resource_prefix_cli_credentials as it will affect parallel tests running in same region. --- tests/integration-tests/clusters_factory.py | 48 ++++++---- tests/integration-tests/conftest.py | 1 + .../framework/credential_providers.py | 11 ++- tests/integration-tests/tests/iam/test_iam.py | 93 +++++++++---------- .../user-role-iam-resource-prefix.cfn.yaml | 70 ++------------ tests/integration-tests/utils.py | 11 ++- 6 files changed, 102 insertions(+), 132 deletions(-) diff --git a/tests/integration-tests/clusters_factory.py b/tests/integration-tests/clusters_factory.py index 16c57930d4..2585951484 100644 --- a/tests/integration-tests/clusters_factory.py +++ b/tests/integration-tests/clusters_factory.py @@ -44,7 +44,7 @@ def wrapper(*args, **kwargs): class Cluster: """Contain all static and dynamic data related to a cluster instance.""" - def __init__(self, name, ssh_key, config_file, region): + def __init__(self, name, ssh_key, config_file, region, custom_cli_credentials=None): self.name = name self.config_file = config_file self.ssh_key = ssh_key @@ -57,6 +57,7 @@ def __init__(self, name, ssh_key, config_file, region): self.__cfn_outputs = None self.__cfn_resources = None self.__cfn_stack_arn = None + self.custom_cli_credentials = custom_cli_credentials def __repr__(self): attrs = ", ".join(["{key}={value}".format(key=key, value=repr(value)) for key, value in self.__dict__.items()]) @@ -89,7 +90,12 @@ def update(self, config_file, raise_on_error=True, log_error=True, **kwargs): # TODO Remove the validator suppression below once the plugin scheduler is officially supported if self.config["Scheduling"]["Scheduler"] == "plugin": command.extend(["--suppress-validators", "type:SchedulerValidator"]) - result = run_pcluster_command(command, raise_on_error=raise_on_error, log_error=log_error) + result = run_pcluster_command( + command, + raise_on_error=raise_on_error, + log_error=log_error, + custom_cli_credentials=self.custom_cli_credentials, + ) logging.info("update-cluster response: %s", result.stdout) response = json.loads(result.stdout) if response.get("cloudFormationStackStatus") != "UPDATE_COMPLETE": @@ -130,7 +136,7 @@ def delete(self, delete_logs=False): logging.warning("CloudWatch logs for cluster %s are preserved due to failure.", self.name) try: self.cfn_stack_arn # Cache cfn_stack_arn attribute before stack deletion - result = run_pcluster_command(cmd_args, log_error=False) + result = run_pcluster_command(cmd_args, log_error=False, custom_cli_credentials=self.custom_cli_credentials) if "DELETE_FAILED" in result.stdout: error = "Cluster deletion failed for {0} with output: {1}".format(self.name, result.stdout) logging.error(error) @@ -153,7 +159,7 @@ def start(self): else: # slurm and scheduler plugin case cmd_args.append("START_REQUESTED") try: - result = run_pcluster_command(cmd_args, log_error=False) + result = run_pcluster_command(cmd_args, log_error=False, custom_cli_credentials=self.custom_cli_credentials) logging.info("Cluster {0} started successfully".format(self.name)) return result.stdout except subprocess.CalledProcessError as e: @@ -169,7 +175,7 @@ def stop(self): else: # slurm and scheduler plugin case cmd_args.append("STOP_REQUESTED") try: - result = run_pcluster_command(cmd_args, log_error=False) + result = run_pcluster_command(cmd_args, log_error=False, custom_cli_credentials=self.custom_cli_credentials) logging.info("Cluster {0} stopped successfully".format(self.name)) return result.stdout except subprocess.CalledProcessError as e: @@ -180,7 +186,7 @@ def describe_cluster(self): """Run pcluster describe-cluster and return the result.""" cmd_args = ["pcluster", "describe-cluster", "--cluster-name", self.name] try: - result = run_pcluster_command(cmd_args, log_error=False) + result = run_pcluster_command(cmd_args, log_error=False, custom_cli_credentials=self.custom_cli_credentials) response = json.loads(result.stdout) logging.info("Get cluster {0} status successfully".format(self.name)) return response @@ -192,7 +198,7 @@ def describe_compute_fleet(self): """Run pcluster describe-compute-fleet and return the result.""" cmd_args = ["pcluster", "describe-compute-fleet", "--cluster-name", self.name] try: - result = run_pcluster_command(cmd_args, log_error=False) + result = run_pcluster_command(cmd_args, log_error=False, custom_cli_credentials=self.custom_cli_credentials) response = json.loads(result.stdout) logging.info("Describe cluster %s compute fleet successfully", self.name) return response @@ -216,7 +222,7 @@ def describe_cluster_instances(self, node_type=None, queue_name=None): if queue_name: cmd_args.extend(["--queue-name", queue_name]) try: - result = run_pcluster_command(cmd_args, log_error=False) + result = run_pcluster_command(cmd_args, log_error=False, custom_cli_credentials=self.custom_cli_credentials) response = json.loads(result.stdout) logging.info("Get cluster {0} instances successfully".format(self.name)) return response["instances"] @@ -239,7 +245,7 @@ def export_logs(self, bucket, output_file=None, bucket_prefix=None, filters=None if filters: cmd_args += ["--filters", filters] try: - result = run_pcluster_command(cmd_args, log_error=False) + result = run_pcluster_command(cmd_args, log_error=False, custom_cli_credentials=self.custom_cli_credentials) response = json.loads(result.stdout) logging.info("Cluster's logs exported successfully") return response @@ -253,7 +259,7 @@ def list_log_streams(self, next_token=None): if next_token: cmd_args.extend(["--next-token", next_token]) try: - result = run_pcluster_command(cmd_args, log_error=False) + result = run_pcluster_command(cmd_args, log_error=False, custom_cli_credentials=self.custom_cli_credentials) response = json.loads(result.stdout) logging.info("Cluster's logs listed successfully") return response @@ -281,7 +287,7 @@ def get_log_events(self, log_stream, **args): cmd_args.extend([f"--{kebab_case(k)}", str(val)]) try: - result = run_pcluster_command(cmd_args, log_error=False) + result = run_pcluster_command(cmd_args, log_error=False, custom_cli_credentials=self.custom_cli_credentials) response = json.loads(result.stdout) logging.info("Log events retrieved successfully") return response @@ -296,7 +302,7 @@ def get_stack_events(self, **args): cmd_args.extend([f"--{kebab_case(k)}", str(val)]) try: - result = run_pcluster_command(cmd_args, log_error=False) + result = run_pcluster_command(cmd_args, log_error=False, custom_cli_credentials=self.custom_cli_credentials) response = json.loads(result.stdout) logging.info("Stack events retrieved successfully") return response @@ -399,9 +405,10 @@ def _delete_volumes(self): class ClustersFactory: """Manage creation and destruction of pcluster clusters.""" - def __init__(self, delete_logs_on_success=False): + def __init__(self, delete_logs_on_success=False, custom_cli_credentials=None): self.__created_clusters = {} self._delete_logs_on_success = delete_logs_on_success + self.custom_cli_credentials = custom_cli_credentials def create_cluster(self, cluster, log_error=True, raise_on_error=True, **kwargs): """ @@ -417,10 +424,12 @@ def create_cluster(self, cluster, log_error=True, raise_on_error=True, **kwargs) # create the cluster logging.info("Creating cluster {0} with config {1}".format(name, cluster.config_file)) + self.custom_cli_credentials = kwargs.get("custom_cli_credentials") command, wait = self._build_command(cluster, kwargs) try: - result = run_pcluster_command(command, timeout=7200, raise_on_error=raise_on_error, log_error=log_error) - + result = run_pcluster_command( + command, timeout=7200, raise_on_error=raise_on_error, log_error=log_error, **kwargs + ) logging.info("create-cluster response: %s", result.stdout) response = json.loads(result.stdout) if wait: @@ -470,10 +479,11 @@ def _build_command(cluster, kwargs): kwargs["suppress_validators"] = validators_list for k, val in kwargs.items(): - if isinstance(val, (list, tuple)): - command.extend([f"--{kebab_case(k)}"] + list(map(str, val))) - else: - command.extend([f"--{kebab_case(k)}", str(val)]) + if k != "custom_cli_credentials": + if isinstance(val, (list, tuple)): + command.extend([f"--{kebab_case(k)}"] + list(map(str, val))) + else: + command.extend([f"--{kebab_case(k)}", str(val)]) return command, wait diff --git a/tests/integration-tests/conftest.py b/tests/integration-tests/conftest.py index b1e8cc21fb..8ae06d9d9d 100644 --- a/tests/integration-tests/conftest.py +++ b/tests/integration-tests/conftest.py @@ -386,6 +386,7 @@ def _cluster_factory(cluster_config, upper_case_cluster_name=False, **kwargs): config_file=cluster_config, ssh_key=request.config.getoption("key_path"), region=region, + custom_cli_credentials=kwargs.get("custom_cli_credentials"), ) if not request.config.getoption("cluster"): cluster.creation_response = factory.create_cluster(cluster, **kwargs) diff --git a/tests/integration-tests/framework/credential_providers.py b/tests/integration-tests/framework/credential_providers.py index c281678b1f..c81d28cc11 100644 --- a/tests/integration-tests/framework/credential_providers.py +++ b/tests/integration-tests/framework/credential_providers.py @@ -25,15 +25,20 @@ def register_cli_credentials_for_region(region, iam_role): def run_pcluster_command(*args, **kwargs): """Run a command after assuming the role configured through register_cli_credentials_for_region.""" + region = kwargs.get("region") if not region: region = os.environ["AWS_DEFAULT_REGION"] - if region in cli_credentials: - with sts_credential_provider(region, cli_credentials[region]): + if kwargs.get("custom_cli_credentials"): + with sts_credential_provider(region, credential_arn=kwargs.get("custom_cli_credentials")): return run_command(*args, **kwargs) else: - return run_command(*args, **kwargs) + if region in cli_credentials: + with sts_credential_provider(region, cli_credentials[region]): + return run_command(*args, **kwargs) + else: + return run_command(*args, **kwargs) @contextmanager diff --git a/tests/integration-tests/tests/iam/test_iam.py b/tests/integration-tests/tests/iam/test_iam.py index 53fbd04934..3c77dfa5d3 100644 --- a/tests/integration-tests/tests/iam/test_iam.py +++ b/tests/integration-tests/tests/iam/test_iam.py @@ -18,7 +18,6 @@ import yaml from assertpy import assert_that from cfn_stacks_factory import CfnStack, CfnStacksFactory -from framework.credential_providers import register_cli_credentials_for_region from framework.tests_configuration.config_utils import get_all_regions from remote_command_executor import RemoteCommandExecutor from s3_common_utils import check_s3_read_resource, check_s3_read_write_resource, get_policy_resources @@ -336,11 +335,11 @@ def test_s3_read_write_resource(region, pcluster_config_reader, s3_bucket_factor check_s3_read_resource(region, cluster, get_policy_resources(config, enable_write_access=False)) check_s3_read_write_resource(region, cluster, get_policy_resources(config, enable_write_access=True)) -@pytest.mark.parametrize("iam_resource_prefix",[ "/path-prefix/name-prefix-"]) + +@pytest.mark.parametrize("iam_resource_prefix", ["/path-prefix/name-prefix-"]) @pytest.mark.usefixtures("os", "instance") def test_iam_resource_prefix( - register_resource_prefix_cli_credentials, - region, + initialize_resource_prefix_cli_creds, pcluster_config_reader, clusters_factory, test_datadir, @@ -349,18 +348,19 @@ def test_iam_resource_prefix( s3_bucket, iam_resource_prefix, ): - bucket_name = s3_bucket - cfn_client, _, iam_client, _ = _create_boto3_clients(region) - create_config, _ = _get_config_create_and_update(test_datadir) - cluster_config = pcluster_config_reader( - config_file=create_config, - min_count=1, - bucket=bucket_name, - iam_resource_prefix= iam_resource_prefix - ) + cli_credentials = initialize_resource_prefix_cli_creds(test_datadir) + if cli_credentials: + for region, creds in cli_credentials.items(): + + bucket_name = s3_bucket + cfn_client, _, iam_client, _ = _create_boto3_clients(region) + create_config, _ = _get_config_create_and_update(test_datadir) + cluster_config = pcluster_config_reader( + config_file=create_config, min_count=1, bucket=bucket_name, iam_resource_prefix=iam_resource_prefix + ) - cluster = clusters_factory(cluster_config) - _test_iam_resource_in_cluster(cfn_client, iam_client, cluster.name, iam_resource_prefix) + cluster = clusters_factory(cluster_config, custom_cli_credentials=creds) + _test_iam_resource_in_cluster(cfn_client, iam_client, cluster.name, iam_resource_prefix) def _split_resource_prefix(resource_prefix): @@ -396,7 +396,7 @@ def _check_iam_resource_prefix(resource_arn_list, iam_resource_prefix): def _test_iam_resource_in_cluster(cfn_client, iam_client, stack_name, iam_resource_prefix): - """Test IAM resources ( Roles, policy and Instance profiles) by checking the path and name prefix in AWS IAM and check cluster is created.""" + """Test IAM resources by checking the path and name prefix in AWS IAM and check cluster is created.""" # Check for cluster Status @@ -424,36 +424,39 @@ def _test_iam_resource_in_cluster(cfn_client, iam_client, stack_name, iam_resour _check_iam_resource_prefix(resource_arn_list, iam_resource_prefix) -@pytest.fixture() -def initialize_resource_prefix_cli_creds(request,test_datadir): +@pytest.fixture(scope="class") +def initialize_resource_prefix_cli_creds(request): """Create an IAM Role with Permission Boundary for testing Resource Prefix Feature.""" + stack_factory = CfnStacksFactory(request.config.getoption("credential")) - regions = request.config.getoption("regions") or get_all_regions(request.config.getoption("tests_config")) - stack_template_path = os_lib.path.join("..", "iam_policies", test_datadir/"user-role-iam-resource-prefix.cfn.yaml") - with open(stack_template_path, encoding="utf-8") as stack_template_file: - stack_template_data = stack_template_file.read() - cli_creds = {} - for region in regions: - if request.config.getoption("iam_user_role_stack_name"): - stack_name = request.config.getoption("iam_user_role_stack_name") - logging.info(f"Using stack {stack_name} in region {region}") - stack = CfnStack( - name=stack_name, region=region, capabilities=["CAPABILITY_IAM"], template=stack_template_data - ) - else: - logging.info("Creating IAM roles for pcluster CLI") - stack_name = generate_stack_name( - "integ-tests-iam-rp-user-role", request.config.getoption("stackname_suffix") - ) - stack = CfnStack( - name=stack_name, region=region, capabilities=["CAPABILITY_IAM"], template=stack_template_data - ) + def _create_resource_prefix_cli_creds(test_datadir): + regions = request.config.getoption("regions") or get_all_regions(request.config.getoption("tests_config")) + stack_template_path = os_lib.path.join("..", test_datadir / "user-role-iam-resource-prefix.cfn.yaml") + with open(stack_template_path, encoding="utf-8") as stack_template_file: + stack_template_data = stack_template_file.read() + cli_creds = {} + for region in regions: + if request.config.getoption("iam_user_role_stack_name"): + stack_name = request.config.getoption("iam_user_role_stack_name") + logging.info(f"Using stack {stack_name} in region {region}") + stack = CfnStack( + name=stack_name, region=region, capabilities=["CAPABILITY_IAM"], template=stack_template_data + ) + else: + logging.info("Creating IAM roles for pcluster CLI") + stack_name = generate_stack_name( + "integ-tests-iam-rp-user-role", request.config.getoption("stackname_suffix") + ) + stack = CfnStack( + name=stack_name, region=region, capabilities=["CAPABILITY_IAM"], template=stack_template_data + ) - stack_factory.create_stack(stack) - cli_creds[region] = stack.cfn_outputs["ParallelClusterUserRole"] + stack_factory.create_stack(stack) + cli_creds[region] = stack.cfn_outputs["ParallelClusterUserRole"] + return cli_creds - yield cli_creds + yield _create_resource_prefix_cli_creds if not request.config.getoption("no_delete"): stack_factory.delete_all_stacks() @@ -461,14 +464,6 @@ def initialize_resource_prefix_cli_creds(request,test_datadir): logging.warning("Skipping deletion of CFN stacks because --no-delete option is set") -@pytest.fixture() -def register_resource_prefix_cli_credentials(initialize_resource_prefix_cli_creds): - """Register the credentials for creating a cluster.""" - if initialize_resource_prefix_cli_creds: - for region, creds in initialize_resource_prefix_cli_creds.items(): - register_cli_credentials_for_region(region, creds) - - def _inject_resource_in_config(cluster_config, resource_value, resource_keys): """Injects cluster config file with a given resource key-value.""" with open(cluster_config, encoding="utf-8") as conf_file: diff --git a/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/user-role-iam-resource-prefix.cfn.yaml b/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/user-role-iam-resource-prefix.cfn.yaml index 20389b8d1a..b0ab6cb067 100644 --- a/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/user-role-iam-resource-prefix.cfn.yaml +++ b/tests/integration-tests/tests/iam/test_iam/test_iam_resource_prefix/user-role-iam-resource-prefix.cfn.yaml @@ -88,9 +88,6 @@ Resources: Resource: - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* Effect: Allow Sid: IamRole - Action: @@ -98,9 +95,6 @@ Resources: Resource: - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* Effect: Allow Condition: !If - EnablePermissionsBoundary @@ -115,9 +109,6 @@ Resources: Resource: - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* Effect: Allow Sid: IamInlinePolicy Condition: !If @@ -132,16 +123,11 @@ Resources: Resource: - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* Condition: ArnLike: iam:PolicyARN: - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/path-prefix/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/parallelcluster* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/parallelcluster/* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/*/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/path-prefix/name-prefix-* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/name-prefix-* - !Sub arn:${AWS::Partition}:iam::aws:policy/CloudWatchAgentServerPolicy - !Sub arn:${AWS::Partition}:iam::aws:policy/AmazonSSMManagedInstanceCore - !Sub arn:${AWS::Partition}:iam::aws:policy/AWSBatchFullAccess @@ -175,11 +161,6 @@ Resources: Resource: - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* -# - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/*' -# - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-*' -# - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*' -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/name-prefix-* Effect: Allow Condition: StringEqualsIfExists: @@ -394,22 +375,10 @@ Resources: Resource: - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/path-prefix/name-prefix-* +# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/path-prefix/name-prefix-* # Some of the Inline ParallelCluster Policies dont have a path - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/name-prefix-* - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/parallelcluster/* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/parallelcluster/* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/path-prefix/* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/name-prefix-* Effect: Allow Sid: IamRead - Action: @@ -419,10 +388,7 @@ Resources: - iam:RemoveRoleFromInstanceProfile Resource: - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/name-prefix-* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/parallelcluster/* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/* Effect: Allow Sid: IamInstanceProfile - Action: @@ -435,9 +401,7 @@ Resources: - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* + Effect: Allow Condition: StringEqualsIfExists: @@ -600,11 +564,6 @@ Resources: - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/name-prefix-* Effect: Allow - Action: - cloudformation:* @@ -711,11 +670,7 @@ Resources: Resource: - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/parallelcluster/* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/name-prefix-* Effect: Allow - Action: - iam:CreateInstanceProfile @@ -728,11 +683,7 @@ Resources: - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/name-prefix-* - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/*/name-prefix-* - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/name-prefix-* - - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/path-prefix/* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/*/name-prefix-* -# - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/* + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/path-prefix/* Effect: Allow - Action: - iam:CreatePolicy @@ -748,10 +699,9 @@ Resources: - iam:TagPolicy - iam:UntagPolicy Resource: - - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/path-prefix/name-prefix-*' - # - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/parallelcluster/*' -# - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/*/name-prefix-*' -# - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/path-prefix/*' +# - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/path-prefix/name-prefix-*' + - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/name-prefix-*' + Effect: Allow - Action: - ec2:Describe* diff --git a/tests/integration-tests/utils.py b/tests/integration-tests/utils.py index f404976d88..2a89c3fbcc 100644 --- a/tests/integration-tests/utils.py +++ b/tests/integration-tests/utils.py @@ -81,7 +81,16 @@ def retry_if_subprocess_error(exception): return isinstance(exception, subprocess.CalledProcessError) -def run_command(command, capture_output=True, log_error=True, env=None, timeout=None, raise_on_error=True, shell=False): +def run_command( + command, + capture_output=True, + log_error=True, + env=None, + timeout=None, + raise_on_error=True, + shell=False, + custom_cli_credentials=None, +): """Execute shell command.""" if isinstance(command, str) and not shell: command = shlex.split(command) From 0f79232285de0bd9383b2fd175721922932cc2b3 Mon Sep 17 00:00:00 2001 From: Himani Deshpande Date: Thu, 15 Dec 2022 14:56:46 -0500 Subject: [PATCH 6/8] Remove unnecessary methods and keyword Arguments Remove _inject_resource_in_config() which is unused for this PR( part of another PR) Revert run_command() to its original definition Update run_pcluster_command() to handle switching and KeyError for credential_arn Remove unnecessary key word arguments passed from create_cluster() to run_pcluster_command() Reference PR: https://github.com/aws/aws-parallelcluster/pull/4652 --- tests/integration-tests/clusters_factory.py | 6 +++++- .../framework/credential_providers.py | 9 ++++----- tests/integration-tests/tests/iam/test_iam.py | 11 +---------- tests/integration-tests/utils.py | 1 - 4 files changed, 10 insertions(+), 17 deletions(-) diff --git a/tests/integration-tests/clusters_factory.py b/tests/integration-tests/clusters_factory.py index 2585951484..4e99422428 100644 --- a/tests/integration-tests/clusters_factory.py +++ b/tests/integration-tests/clusters_factory.py @@ -428,7 +428,11 @@ def create_cluster(self, cluster, log_error=True, raise_on_error=True, **kwargs) command, wait = self._build_command(cluster, kwargs) try: result = run_pcluster_command( - command, timeout=7200, raise_on_error=raise_on_error, log_error=log_error, **kwargs + command, + timeout=7200, + raise_on_error=raise_on_error, + log_error=log_error, + custom_cli_credentials=self.custom_cli_credentials, ) logging.info("create-cluster response: %s", result.stdout) response = json.loads(result.stdout) diff --git a/tests/integration-tests/framework/credential_providers.py b/tests/integration-tests/framework/credential_providers.py index c81d28cc11..d1707ec60b 100644 --- a/tests/integration-tests/framework/credential_providers.py +++ b/tests/integration-tests/framework/credential_providers.py @@ -30,12 +30,11 @@ def run_pcluster_command(*args, **kwargs): if not region: region = os.environ["AWS_DEFAULT_REGION"] - if kwargs.get("custom_cli_credentials"): - with sts_credential_provider(region, credential_arn=kwargs.get("custom_cli_credentials")): - return run_command(*args, **kwargs) - else: if region in cli_credentials: - with sts_credential_provider(region, cli_credentials[region]): + with sts_credential_provider( + region, credential_arn=kwargs.get("custom_cli_credentials") or cli_credentials.get(region) + ): + kwargs.pop("custom_cli_credentials", None) return run_command(*args, **kwargs) else: return run_command(*args, **kwargs) diff --git a/tests/integration-tests/tests/iam/test_iam.py b/tests/integration-tests/tests/iam/test_iam.py index 3c77dfa5d3..6cd560a845 100644 --- a/tests/integration-tests/tests/iam/test_iam.py +++ b/tests/integration-tests/tests/iam/test_iam.py @@ -21,7 +21,7 @@ from framework.tests_configuration.config_utils import get_all_regions from remote_command_executor import RemoteCommandExecutor from s3_common_utils import check_s3_read_resource, check_s3_read_write_resource, get_policy_resources -from utils import dict_add_nested_key, generate_stack_name, wait_for_computefleet_changed +from utils import generate_stack_name, wait_for_computefleet_changed from tests.common.assertions import assert_no_errors_in_logs from tests.schedulers.test_awsbatch import _test_job_submission as _test_job_submission_awsbatch @@ -462,12 +462,3 @@ def _create_resource_prefix_cli_creds(test_datadir): stack_factory.delete_all_stacks() else: logging.warning("Skipping deletion of CFN stacks because --no-delete option is set") - - -def _inject_resource_in_config(cluster_config, resource_value, resource_keys): - """Injects cluster config file with a given resource key-value.""" - with open(cluster_config, encoding="utf-8") as conf_file: - config_content = yaml.load(conf_file, Loader=yaml.SafeLoader) - dict_add_nested_key(config_content, resource_value, resource_keys) - with open(cluster_config, "w", encoding="utf-8") as conf_file: - yaml.dump(config_content, conf_file) diff --git a/tests/integration-tests/utils.py b/tests/integration-tests/utils.py index 2a89c3fbcc..a7c06b1637 100644 --- a/tests/integration-tests/utils.py +++ b/tests/integration-tests/utils.py @@ -89,7 +89,6 @@ def run_command( timeout=None, raise_on_error=True, shell=False, - custom_cli_credentials=None, ): """Execute shell command.""" if isinstance(command, str) and not shell: From 40fa483dd75f675ae77c61356df97e15ea4803d7 Mon Sep 17 00:00:00 2001 From: Himani Deshpande Date: Thu, 15 Dec 2022 17:50:16 -0500 Subject: [PATCH 7/8] Remove custom_cli_credentials from ClusterFactory Removing custom_cli_credentials from Cluster Factory class member. Reference PR: https://github.com/aws/aws-parallelcluster/pull/4652 --- tests/integration-tests/clusters_factory.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/integration-tests/clusters_factory.py b/tests/integration-tests/clusters_factory.py index 4e99422428..0b0c8832fc 100644 --- a/tests/integration-tests/clusters_factory.py +++ b/tests/integration-tests/clusters_factory.py @@ -405,10 +405,9 @@ def _delete_volumes(self): class ClustersFactory: """Manage creation and destruction of pcluster clusters.""" - def __init__(self, delete_logs_on_success=False, custom_cli_credentials=None): + def __init__(self, delete_logs_on_success=False): self.__created_clusters = {} self._delete_logs_on_success = delete_logs_on_success - self.custom_cli_credentials = custom_cli_credentials def create_cluster(self, cluster, log_error=True, raise_on_error=True, **kwargs): """ @@ -424,7 +423,6 @@ def create_cluster(self, cluster, log_error=True, raise_on_error=True, **kwargs) # create the cluster logging.info("Creating cluster {0} with config {1}".format(name, cluster.config_file)) - self.custom_cli_credentials = kwargs.get("custom_cli_credentials") command, wait = self._build_command(cluster, kwargs) try: result = run_pcluster_command( @@ -432,7 +430,7 @@ def create_cluster(self, cluster, log_error=True, raise_on_error=True, **kwargs) timeout=7200, raise_on_error=raise_on_error, log_error=log_error, - custom_cli_credentials=self.custom_cli_credentials, + custom_cli_credentials=kwargs.get("custom_cli_credentials"), ) logging.info("create-cluster response: %s", result.stdout) response = json.loads(result.stdout) From 9457ef2565f9c5b3ade368f35e7533d8b3bb85d6 Mon Sep 17 00:00:00 2001 From: Himani Deshpande Date: Fri, 16 Dec 2022 14:14:56 -0500 Subject: [PATCH 8/8] Revert change in run_pcluster_command Revert the changes in indentation and scope of if clause in run_pcluster_command() Reference PR: https://github.com/aws/aws-parallelcluster/pull/4652 --- .../framework/credential_providers.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/integration-tests/framework/credential_providers.py b/tests/integration-tests/framework/credential_providers.py index d1707ec60b..d450b08f55 100644 --- a/tests/integration-tests/framework/credential_providers.py +++ b/tests/integration-tests/framework/credential_providers.py @@ -30,14 +30,14 @@ def run_pcluster_command(*args, **kwargs): if not region: region = os.environ["AWS_DEFAULT_REGION"] - if region in cli_credentials: - with sts_credential_provider( - region, credential_arn=kwargs.get("custom_cli_credentials") or cli_credentials.get(region) - ): - kwargs.pop("custom_cli_credentials", None) - return run_command(*args, **kwargs) - else: + if region in cli_credentials: + with sts_credential_provider( + region, credential_arn=kwargs.get("custom_cli_credentials") or cli_credentials.get(region) + ): + kwargs.pop("custom_cli_credentials", None) return run_command(*args, **kwargs) + else: + return run_command(*args, **kwargs) @contextmanager