Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(s3-deployment): enable efs support for handling large files in lambda #15220

Merged
merged 21 commits into from
Sep 15, 2021
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
de3bdfa
feat(s3-deployment): enable efs support for handling large files in l…
Jun 21, 2021
ddfd37e
feat(s3-deployment): enable efs support for handling large files in l…
Jun 21, 2021
a1ab5dd
Merge branch 'master' into s3-dep-efs-support
keshav0891 Jun 21, 2021
2b6ec74
Merge branch 'aws:master' into s3-dep-efs-support
keshav0891 Jun 22, 2021
0080df6
Fixed review comments
Jun 22, 2021
bd5cef0
Fix build failure due to ECS integration tests
Jun 23, 2021
f9b8390
Using VPC for creating singleton EFS and Lambda;
Jun 24, 2021
d2a38d7
Merge branch 'master' into s3-dep-efs-support
keshav0891 Jun 24, 2021
0da4a5a
Merge branch 'master' into s3-dep-efs-support
keshav0891 Jul 6, 2021
99fdbf8
Merge branch 'master' into s3-dep-efs-support
keshav0891 Jul 26, 2021
10f506e
Merge branch 'master' into s3-dep-efs-support
keshav0891 Aug 26, 2021
40f7a3d
Merge branch 'master' into s3-dep-efs-support
keshav0891 Aug 27, 2021
04b4674
Fixed comments and build failure
Aug 27, 2021
d6d94a0
fixed build failure due to ecs integration tests
Aug 27, 2021
1284899
Merge branch 'master' into s3-dep-efs-support
keshav0891 Aug 30, 2021
89fcb54
Merge branch 'master' into s3-dep-efs-support
keshav0891 Sep 13, 2021
d3a965f
Merge branch 'master' into s3-dep-efs-support
RomainMuller Sep 13, 2021
ac4d4f9
Fixed latest comments;
Sep 13, 2021
91a9caf
Merge branch 'master' into s3-dep-efs-support
keshav0891 Sep 13, 2021
f83f928
Fixed stack deletion failure with inline VPC
Sep 15, 2021
1700908
Merge branch 'master' into s3-dep-efs-support
keshav0891 Sep 15, 2021
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -108,15 +108,15 @@
"VpcPublicSubnet1NATGateway4D7517AA": {
"Type": "AWS::EC2::NatGateway",
"Properties": {
"SubnetId": {
"Ref": "VpcPublicSubnet1Subnet5C2D37C4"
},
"AllocationId": {
"Fn::GetAtt": [
"VpcPublicSubnet1EIPD7E02669",
"AllocationId"
]
},
"SubnetId": {
"Ref": "VpcPublicSubnet1Subnet5C2D37C4"
},
"Tags": [
{
"Key": "Name",
Expand Down Expand Up @@ -205,15 +205,15 @@
"VpcPublicSubnet2NATGateway9182C01D": {
"Type": "AWS::EC2::NatGateway",
"Properties": {
"SubnetId": {
"Ref": "VpcPublicSubnet2Subnet691E08A3"
},
"AllocationId": {
"Fn::GetAtt": [
"VpcPublicSubnet2EIP3C605A87",
"AllocationId"
]
},
"SubnetId": {
"Ref": "VpcPublicSubnet2Subnet691E08A3"
},
"Tags": [
{
"Key": "Name",
Expand Down Expand Up @@ -1219,7 +1219,7 @@
"Properties": {
"Code": {
"S3Bucket": {
"Ref": "AssetParametersc24b999656e4fe6c609c31bae56a1cf4717a405619c3aa6ba1bc686b8c2c86cfS3Bucket55EFA30C"
"Ref": "AssetParameters35fa56a5b1cbdf567620faef050ce082e86ff7310935ab443b13cf0a6f51d627S3BucketFECA0E34"
},
"S3Key": {
"Fn::Join": [
Expand All @@ -1232,7 +1232,7 @@
"Fn::Split": [
"||",
{
"Ref": "AssetParametersc24b999656e4fe6c609c31bae56a1cf4717a405619c3aa6ba1bc686b8c2c86cfS3VersionKey60329B70"
"Ref": "AssetParameters35fa56a5b1cbdf567620faef050ce082e86ff7310935ab443b13cf0a6f51d627S3VersionKey7F16088D"
}
]
}
Expand All @@ -1245,7 +1245,7 @@
"Fn::Split": [
"||",
{
"Ref": "AssetParametersc24b999656e4fe6c609c31bae56a1cf4717a405619c3aa6ba1bc686b8c2c86cfS3VersionKey60329B70"
"Ref": "AssetParameters35fa56a5b1cbdf567620faef050ce082e86ff7310935ab443b13cf0a6f51d627S3VersionKey7F16088D"
}
]
}
Expand Down Expand Up @@ -1348,17 +1348,17 @@
"Type": "String",
"Description": "Artifact hash for asset \"e9882ab123687399f934da0d45effe675ecc8ce13b40cb946f3e1d6141fe8d68\""
},
"AssetParametersc24b999656e4fe6c609c31bae56a1cf4717a405619c3aa6ba1bc686b8c2c86cfS3Bucket55EFA30C": {
"AssetParameters35fa56a5b1cbdf567620faef050ce082e86ff7310935ab443b13cf0a6f51d627S3BucketFECA0E34": {
"Type": "String",
"Description": "S3 bucket for asset \"c24b999656e4fe6c609c31bae56a1cf4717a405619c3aa6ba1bc686b8c2c86cf\""
"Description": "S3 bucket for asset \"35fa56a5b1cbdf567620faef050ce082e86ff7310935ab443b13cf0a6f51d627\""
},
"AssetParametersc24b999656e4fe6c609c31bae56a1cf4717a405619c3aa6ba1bc686b8c2c86cfS3VersionKey60329B70": {
"AssetParameters35fa56a5b1cbdf567620faef050ce082e86ff7310935ab443b13cf0a6f51d627S3VersionKey7F16088D": {
"Type": "String",
"Description": "S3 key for asset version \"c24b999656e4fe6c609c31bae56a1cf4717a405619c3aa6ba1bc686b8c2c86cf\""
"Description": "S3 key for asset version \"35fa56a5b1cbdf567620faef050ce082e86ff7310935ab443b13cf0a6f51d627\""
},
"AssetParametersc24b999656e4fe6c609c31bae56a1cf4717a405619c3aa6ba1bc686b8c2c86cfArtifactHash85F58E48": {
"AssetParameters35fa56a5b1cbdf567620faef050ce082e86ff7310935ab443b13cf0a6f51d627ArtifactHashB90DC367": {
"Type": "String",
"Description": "Artifact hash for asset \"c24b999656e4fe6c609c31bae56a1cf4717a405619c3aa6ba1bc686b8c2c86cf\""
"Description": "Artifact hash for asset \"35fa56a5b1cbdf567620faef050ce082e86ff7310935ab443b13cf0a6f51d627\""
},
"AssetParameters972240f9dd6e036a93d5f081af9a24315b2053828ac049b3b19b2fa12d7ae64aS3Bucket1F1A8472": {
"Type": "String",
Expand All @@ -1385,4 +1385,4 @@
"Description": "Artifact hash for asset \"872561bf078edd1685d50c9ff821cdd60d2b2ddfb0013c4087e79bf2bb50724d\""
}
}
}
}
20 changes: 20 additions & 0 deletions packages/@aws-cdk/aws-s3-deployment/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,26 @@ size of the AWS Lambda resource handler.
> NOTE: a new AWS Lambda handler will be created in your stack for each memory
> limit configuration.

## EFS Support

If your workflow needs more disk space than default (512 MB) disk space, you may attach an EFS storage to underlying
lambda function. To Enable EFS support set `efs` and `vpc` props for BucketDeployment.

Check sample usage below.
Please note that creating VPC inline may cause stack deletion failures. It is shown as below for simplicity.
To avoid such condition, keep your network infra (VPC) in a separate stack and pass as props.

```ts
new s3deploy.BucketDeployment(this, 'DeployMeWithEfsStorage', {
sources: [s3deploy.Source.asset(path.join(__dirname, 'my-website'))],
destinationBucket,
destinationKeyPrefix: 'efs/',
enableTempEfsStorage: true,
iliapolo marked this conversation as resolved.
Show resolved Hide resolved
vpc: new ec2.Vpc(this, 'Vpc'),
retainOnDelete: false,
});
```

## Notes

- This library uses an AWS CloudFormation custom resource which about 10MiB in
Expand Down
67 changes: 65 additions & 2 deletions packages/@aws-cdk/aws-s3-deployment/lib/bucket-deployment.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import * as path from 'path';
import * as cloudfront from '@aws-cdk/aws-cloudfront';
import * as ec2 from '@aws-cdk/aws-ec2';
import * as efs from '@aws-cdk/aws-efs';
import * as iam from '@aws-cdk/aws-iam';
import * as lambda from '@aws-cdk/aws-lambda';
import * as s3 from '@aws-cdk/aws-s3';
Expand Down Expand Up @@ -83,6 +84,13 @@ export interface BucketDeploymentProps {
*/
readonly memoryLimit?: number;

/**
* Mount an EFS file system. Enable this if your assets are large and you encounter disk space errors.
keshav0891 marked this conversation as resolved.
Show resolved Hide resolved
*
* @default - No EFS. Lambda has access only to 512MB of disk space.
*/
readonly useEfs?: boolean

/**
* Execution role associated with this function
*
Expand Down Expand Up @@ -167,6 +175,7 @@ export interface BucketDeploymentProps {

/**
* The VPC network to place the deployment lambda handler in.
* This is required if `useEfs` is set.
*
* @default None
*/
Expand All @@ -193,18 +202,52 @@ export class BucketDeployment extends CoreConstruct {
throw new Error('Distribution must be specified if distribution paths are specified');
}

if (props.useEfs && !props.vpc) {
throw new Error('Vpc must be specified if useEfs is set');
}

const accessPointPath = '/lambda';
let accessPoint;
if (props.useEfs && props.vpc) {
const accessMode = '0777';
const fileSystem = this.getOrCreateEfsFileSystem(scope, {
vpc: props.vpc,
removalPolicy: cdk.RemovalPolicy.DESTROY,
});
accessPoint = fileSystem.addAccessPoint('AccessPoint', {
path: accessPointPath,
keshav0891 marked this conversation as resolved.
Show resolved Hide resolved
createAcl: {
ownerUid: '1001',
ownerGid: '1001',
permissions: accessMode,
},
posixUser: {
uid: '1001',
gid: '1001',
},
});
keshav0891 marked this conversation as resolved.
Show resolved Hide resolved
}

const mountPath = `/mnt${accessPointPath}`;
const handler = new lambda.SingletonFunction(this, 'CustomResourceHandler', {
uuid: this.renderSingletonUuid(props.memoryLimit),
uuid: this.renderSingletonUuid(props.memoryLimit, props.vpc),
code: lambda.Code.fromAsset(path.join(__dirname, 'lambda')),
layers: [new AwsCliLayer(this, 'AwsCliLayer')],
runtime: lambda.Runtime.PYTHON_3_6,
environment: props.useEfs ? {
MOUNT_PATH: mountPath,
keshav0891 marked this conversation as resolved.
Show resolved Hide resolved
} : undefined,
handler: 'index.handler',
lambdaPurpose: 'Custom::CDKBucketDeployment',
timeout: cdk.Duration.minutes(15),
role: props.role,
memorySize: props.memoryLimit,
vpc: props.vpc,
vpcSubnets: props.vpcSubnets,
filesystem: accessPoint ? lambda.FileSystem.fromEfsAccessPoint(
iliapolo marked this conversation as resolved.
Show resolved Hide resolved
accessPoint,
mountPath,
): undefined,
});

const handlerRole = handler.role;
Expand Down Expand Up @@ -240,7 +283,7 @@ export class BucketDeployment extends CoreConstruct {

}

private renderSingletonUuid(memoryLimit?: number) {
private renderSingletonUuid(memoryLimit?: number, vpc?: ec2.IVpc) {
let uuid = '8693BB64-9689-44B6-9AAF-B0CC9EB8756C';

// if user specify a custom memory limit, define another singleton handler
Expand All @@ -254,8 +297,28 @@ export class BucketDeployment extends CoreConstruct {
uuid += `-${memoryLimit.toString()}MiB`;
}

// if user specify to use VPC, define another singleton handler
// with this configuration. otherwise, it won't be possible to use multiple
// configurations since we have a singleton.
// A VPC is a must if EFS storage is used and that's why we are only using VPC in uuid.
if (vpc) {
uuid += `-${vpc.node.addr.toUpperCase()}`;
keshav0891 marked this conversation as resolved.
Show resolved Hide resolved
}

return uuid;
}

/**
* Function to get/create a stack singleton instance of EFS FileSystem per vpc.
*
* @param scope Construct
* @param fileSystemProps EFS FileSystemProps
*/
private getOrCreateEfsFileSystem(scope: Construct, fileSystemProps: efs.FileSystemProps): efs.FileSystem {
const stack = cdk.Stack.of(scope);
const uuid = `Efs-${fileSystemProps.vpc.node.addr.toUpperCase()}`;
keshav0891 marked this conversation as resolved.
Show resolved Hide resolved
return stack.node.tryFindChild(uuid) as efs.FileSystem ?? new efs.FileSystem(scope, uuid, fileSystemProps);
}
}

/**
Expand Down
27 changes: 15 additions & 12 deletions packages/@aws-cdk/aws-s3-deployment/lib/lambda/index.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,24 @@
import subprocess
import os
import tempfile
import json
import contextlib
import json
import traceback
import logging
import os
import shutil
import boto3
import contextlib
from datetime import datetime
from uuid import uuid4

import subprocess
import tempfile
from urllib.request import Request, urlopen
from uuid import uuid4
from zipfile import ZipFile

import boto3

logger = logging.getLogger()
logger.setLevel(logging.INFO)

cloudfront = boto3.client('cloudfront')

CFN_SUCCESS = "SUCCESS"
CFN_FAILED = "FAILED"
ENV_KEY_MOUNT_PATH = "MOUNT_PATH"

def handler(event, context):

Expand Down Expand Up @@ -115,8 +113,13 @@ def cfn_error(message=None):
#---------------------------------------------------------------------------------------------------
# populate all files from s3_source_zips to a destination bucket
def s3_deploy(s3_source_zips, s3_dest, user_metadata, system_metadata, prune):
# create a temporary working directory
workdir=tempfile.mkdtemp()
# create a temporary working directory in /tmp or if enabled an attached efs volume
if ENV_KEY_MOUNT_PATH in os.environ:
workdir = os.getenv(ENV_KEY_MOUNT_PATH) + "/" + str(uuid4())
os.mkdir(workdir)
else:
keshav0891 marked this conversation as resolved.
Show resolved Hide resolved
workdir = tempfile.mkdtemp()
keshav0891 marked this conversation as resolved.
Show resolved Hide resolved

logger.info("| workdir: %s" % workdir)

# create a directory into which we extract the contents of the zip file
Expand Down
2 changes: 2 additions & 0 deletions packages/@aws-cdk/aws-s3-deployment/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@
"dependencies": {
"@aws-cdk/aws-cloudfront": "0.0.0",
"@aws-cdk/aws-ec2": "0.0.0",
"@aws-cdk/aws-efs": "0.0.0",
"@aws-cdk/aws-iam": "0.0.0",
"@aws-cdk/aws-lambda": "0.0.0",
"@aws-cdk/aws-s3": "0.0.0",
Expand All @@ -101,6 +102,7 @@
"homepage": "https://github.com/aws/aws-cdk",
"peerDependencies": {
"@aws-cdk/aws-cloudfront": "0.0.0",
"@aws-cdk/aws-efs": "0.0.0",
"@aws-cdk/aws-ec2": "0.0.0",
"@aws-cdk/aws-iam": "0.0.0",
"@aws-cdk/aws-lambda": "0.0.0",
Expand Down
Loading