Skip to content
This repository has been archived by the owner on May 17, 2023. It is now read-only.

feat: gracefully delete whole stack #25

Merged
merged 1 commit into from
Dec 16, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,13 @@ It would automatically configure the fresh provisioning Nexus3 with below change
- Delete default `file` based blobstore
- Create a new blobstore named `s3-blobstore` using the dedicated S3 bucket created by this solution with never expiration policy for artifacts

### How to clean
Run below command to clean the deployment or delete the `SonatypeNexus3OnEKS` stack via CloudFormation console.
```
npm run cleanup
```
**NOTE**: you still need manually delete the EFS file system and S3 bucket created by this solution. Those storage might contain your data, be caution before deleting them.

## Security

See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more information.
Expand Down
2 changes: 1 addition & 1 deletion bin/sonatype-nexus3.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,4 +12,4 @@ new SonatypeNexus3Stack(app, 'SonatypeNexus3OnEKS', {
env: env,
});

cdk.Tags.of(app).add('app', 'nexus3');
cdk.Tags.of(app).add('app', 'nexus3');
146 changes: 146 additions & 0 deletions lambda.d/nexus3-purge/index.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
import json
import logging
import time
import urllib3
from uuid import uuid4
import os
import subprocess

logger = logging.getLogger()
logger.setLevel(logging.INFO)
http = urllib3.PoolManager()

CFN_SUCCESS = "SUCCESS"
CFN_FAILED = "FAILED"

# these are coming from the kubectl layer
os.environ['PATH'] = '/opt/kubectl:/opt/awscli:' + os.environ['PATH']

outdir = os.environ.get('TEST_OUTDIR', '/tmp')
kubeconfig = os.path.join(outdir, 'kubeconfig')

def handler(event, context):

def cfn_error(message=None):
logger.error("| cfn_error: %s" % message)
cfn_send(event, context, CFN_FAILED, reason=message)

try:
logger.info(event)

# cloudformation request type (create/update/delete)
request_type = event['RequestType']

# extract resource properties
props = event['ResourceProperties']
old_props = event.get('OldResourceProperties', {})

if request_type == "Create":
physical_id = f"nexus.on.aws.{str(uuid4())}"
else:
physical_id = event.get('PhysicalResourceId', None)
if not physical_id:
cfn_error("invalid request: request type is '%s' but 'PhysicalResourceId' is not defined" % request_type)
return
if request_type == "Delete":
# resource properties (all required)
cluster_name = props['ClusterName']
role_arn = props['RoleArn']
# "log in" to the cluster
subprocess.check_call([ 'aws', 'eks', 'update-kubeconfig',
'--role-arn', role_arn,
'--name', cluster_name,
'--kubeconfig', kubeconfig
])

object_type = props['ObjectType']
object_name = props['ObjectName']
object_namespace = props['ObjectNamespace']
json_path = props['JsonPath']
timeout_seconds = props['TimeoutSeconds']
relase = props['Release']

output = wait_for_purge(['get', '-n', object_namespace, object_type, object_name, "-o=jsonpath='{{{0}}}'".format(json_path)], int(timeout_seconds))
logger.info(f"The resource {object_type}/{object_name} has been purged.")

kubectl(['delete', '-n', object_namespace, 'pvc', '-l', f'release={relase}'])
logger.info(f'The PVC of helm relese {relase} is purged.')

cfn_send(event, context, CFN_SUCCESS, physicalResourceId=physical_id)
except KeyError as e:
cfn_error(f"invalid request. Missing key {str(e)}")
except Exception as e:
logger.exception(e)
cfn_error(str(e))

# sends a response to cloudformation
def cfn_send(event, context, responseStatus, responseData={}, physicalResourceId=None, noEcho=False, reason=None):

responseUrl = event['ResponseURL']
logger.info(responseUrl)

responseBody = {}
responseBody['Status'] = responseStatus
responseBody['Reason'] = reason or ('See the details in CloudWatch Log Stream: ' + context.log_stream_name)
responseBody['PhysicalResourceId'] = physicalResourceId or context.log_stream_name
responseBody['StackId'] = event['StackId']
responseBody['RequestId'] = event['RequestId']
responseBody['LogicalResourceId'] = event['LogicalResourceId']
responseBody['NoEcho'] = noEcho
responseBody['Data'] = responseData

body = json.dumps(responseBody)
logger.info("| response body:\n" + body)

headers = {
'content-type' : '',
'content-length' : str(len(body))
}

try:
response = http.request('PUT',
responseUrl,
body=body,
headers=headers,
retries=False)
logger.info("| status code: " + str(response.status))
except Exception as e:
logger.error("| unable to send response to CloudFormation")
logger.exception(e)

def wait_for_purge(args, timeout_seconds):

end_time = time.time() + timeout_seconds
error = None

while time.time() < end_time:
try:
# the output is surrounded with '', so we unquote
output = kubectl(args).decode('utf-8')[1:-1]
if output:
pass
except Exception as e:
error = str(e)
# also a recoverable error
if 'NotFound' in error:
return 'Resource is purged'
time.sleep(10)

raise RuntimeError(f'Timeout waiting for output from kubectl command: {args} (last_error={error})')

def kubectl(args):
retry = 3
while retry > 0:
try:
cmd = [ 'kubectl', '--kubeconfig', kubeconfig ] + args
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
output = exc.output
if b'i/o timeout' in output and retry > 0:
logger.info("kubectl timed out, retries left: %s" % retry)
retry = retry - 1
else:
raise Exception(output)
else:
logger.info(output)
return output
77 changes: 64 additions & 13 deletions lib/sonatype-nexus3-stack.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ export class SonatypeNexus3Stack extends cdk.Stack {
aws: {
nexus: 'quay.io/travelaudience/docker-nexus',
nexusProxy: 'quay.io/travelaudience/docker-nexus-proxy',
// see https://github.com/aws/aws-cdk/blob/60c782fe173449ebf912f509de7db6df89985915/packages/%40aws-cdk/aws-eks/lib/kubectl-layer.ts
// see https://github.com/aws/aws-cdk/blob/60c782fe173449ebf912f509de7db6df89985915/packages/%40aws-cdk/aws-eks/lib/kubectl-layer.ts#L6
kubectlLayerAppid: 'arn:aws:serverlessrepo:us-east-1:903779448426:applications/lambda-layer-kubectl',
},
'aws-cn': {
Expand Down Expand Up @@ -67,16 +67,15 @@ export class SonatypeNexus3Stack extends cdk.Stack {
});
}
let vpc!: ec2.IVpc;
let createNewVpc: boolean = this.node.tryGetContext('createNewVpc') ?? false
const createNewVpc: boolean = this.node.tryGetContext('createNewVpc') ?? false;
if (createNewVpc) {
vpc = new ec2.Vpc(this, 'NexusVpc', {
maxAzs: 2,
natGateways: 1,
})
});
}
else {
vpc = ec2.Vpc.fromLookup(this, 'vpc', {
isDefault: true
isDefault: true,
});
if (this.azOfSubnets(vpc.publicSubnets) <= 1 ||
this.azOfSubnets(vpc.privateSubnets) <= 1) {
Expand All @@ -85,7 +84,7 @@ export class SonatypeNexus3Stack extends cdk.Stack {
}

const clusterAdmin = new iam.Role(this, 'AdminRole', {
assumedBy: new iam.AccountRootPrincipal()
assumedBy: new iam.AccountRootPrincipal(),
});

const kubectlLayer = new eks.KubectlLayer(this, 'KubeLayer', {
Expand Down Expand Up @@ -159,7 +158,7 @@ export class SonatypeNexus3Stack extends cdk.Stack {
});
}

cluster.addNodegroupCapacity('nodegroup', {
const nodeGroup = cluster.addNodegroupCapacity('nodegroup', {
nodegroupName: 'nexus3',
instanceType: new ec2.InstanceType(this.node.tryGetContext('instanceType') ?? 'm5.large'),
minSize: 1,
Expand Down Expand Up @@ -213,12 +212,19 @@ export class SonatypeNexus3Stack extends cdk.Stack {
enableWafv2: false,
},
});
awsLoadBalancerControllerChart.node.addDependency(nodeGroup);
awsLoadBalancerControllerChart.node.addDependency(albServiceAccount);
awsLoadBalancerControllerChart.node.addDependency(cluster.openIdConnectProvider);
awsLoadBalancerControllerChart.node.addDependency(cluster.awsAuth);

// deploy EFS, EFS CSI driver, PV
const efsCSI = cluster.addHelmChart('EFSCSIDriver', {
chart: 'https://github.com/kubernetes-sigs/aws-efs-csi-driver/releases/download/v0.3.0/helm-chart.tgz',
release: 'efs-csi-driver',
});

efsCSI.node.addDependency(nodeGroup);
efsCSI.node.addDependency(cluster.openIdConnectProvider);
efsCSI.node.addDependency(cluster.awsAuth);
const fileSystem = new efs.FileSystem(this, 'Nexus3FileSystem', {
vpc,
encrypted: false,
Expand All @@ -228,8 +234,10 @@ export class SonatypeNexus3Stack extends cdk.Stack {
});
fileSystem.connections.allowDefaultPortFrom(ec2.Peer.ipv4(vpc.vpcCidrBlock),
'allow access efs from inside vpc');
fileSystem.connections.securityGroups.forEach(sg =>
(sg.node.defaultChild as ec2.CfnSecurityGroup).applyRemovalPolicy(cdk.RemovalPolicy.DESTROY));
const efsClass = 'efs-sc';
cluster.addManifest('efs-storageclass',
const efsStorageClass = cluster.addManifest('efs-storageclass',
{
kind: 'StorageClass',
apiVersion: 'storage.k8s.io/v1',
Expand All @@ -238,11 +246,13 @@ export class SonatypeNexus3Stack extends cdk.Stack {
},
provisioner: 'efs.csi.aws.com'
});
efsStorageClass.node.addDependency(efsCSI);
const efsPVName = 'efs-pv';
const efsPV = cluster.addManifest('efs-pv', {
apiVersion: 'v1',
kind: 'PersistentVolume',
metadata: {
name: 'efs-pv'
name: efsPVName
},
spec: {
capacity: {
Expand All @@ -261,7 +271,7 @@ export class SonatypeNexus3Stack extends cdk.Stack {
}
});
efsPV.node.addDependency(fileSystem);
efsPV.node.addDependency(efsCSI);
efsPV.node.addDependency(efsStorageClass);

const nexus3Namespace = 'default';
const nexusServiceAccount = cluster.addServiceAccount('sonatype-nexus3', {
Expand Down Expand Up @@ -356,6 +366,45 @@ export class SonatypeNexus3Stack extends cdk.Stack {
const enableAutoConfigured: boolean = this.node.tryGetContext('enableAutoConfigured') || false;
const nexus3ChartName = 'nexus3';
const nexus3ChartVersion = '2.1.0';

const neuxs3PurgeFunc = new lambda_python.PythonFunction(this, 'Neuxs3Purge', {
description: 'Func purges the resources(such as pvc) left after deleting Nexus3 helm chart',
entry: path.join(__dirname, '../lambda.d/nexus3-purge'),
index: 'index.py',
handler: 'handler',
runtime: lambda.Runtime.PYTHON_3_7,
environment: cluster.kubectlEnvironment,
logRetention: logs.RetentionDays.ONE_MONTH,
timeout: cdk.Duration.minutes(15),
layers: [ kubectlLayer ],
vpc: vpc,
securityGroups: cluster.kubectlSecurityGroup ? [cluster.kubectlSecurityGroup] : undefined,
vpcSubnets: cluster.kubectlPrivateSubnets ? { subnets: cluster.kubectlPrivateSubnets } : undefined,
});
neuxs3PurgeFunc.role!.addToPrincipalPolicy(new iam.PolicyStatement({
actions: ['eks:DescribeCluster'],
resources: [cluster.clusterArn],
}));
// allow this handler to assume the kubectl role
cluster.kubectlRole!.grant(neuxs3PurgeFunc.role!, 'sts:AssumeRole');

const neuxs3PurgeCR = new cdk.CustomResource(this, 'Neuxs3PurgeCR', {
serviceToken: neuxs3PurgeFunc.functionArn,
resourceType: 'Custom::Neuxs3-Purge',
properties: {
ClusterName: cluster.clusterName,
RoleArn: cluster.kubectlRole!.roleArn,
ObjectType: 'ingress',
ObjectName: `${nexus3ChartName}-sonatype-nexus`,
ObjectNamespace: nexus3Namespace,
JsonPath: '.status.loadBalancer.ingress[0].hostname',
TimeoutSeconds: cdk.Duration.minutes(6).toSeconds(),
Release: nexus3ChartName,
},
});
neuxs3PurgeCR.node.addDependency(efsPV);
neuxs3PurgeCR.node.addDependency(awsLoadBalancerControllerChart);

let nexus3ChartProperties: { [key: string]: any } = {
statefulset: {
enabled: true,
Expand Down Expand Up @@ -439,8 +488,9 @@ export class SonatypeNexus3Stack extends cdk.Stack {
values: nexus3ChartProperties,
});
nexus3Chart.node.addDependency(nexusServiceAccount);
nexus3Chart.node.addDependency(awsLoadBalancerControllerChart);
nexus3Chart.node.addDependency(neuxs3PurgeCR);
if (certificate) {
certificate.node.addDependency(neuxs3PurgeCR);
nexus3Chart.node.addDependency(certificate);
nexus3Chart.node.addDependency(externalDNSResource!);

Expand Down Expand Up @@ -506,7 +556,8 @@ export class SonatypeNexus3Stack extends cdk.Stack {
objectName: `${nexus3ChartName}-sonatype-nexus`,
jsonPath: '.status.loadBalancer.ingress[0].hostname',
});
albAddress.node.addDependency(nexus3Chart);
// albAddress.node.addDependency(nexus3Chart);

if (enableAutoConfigured) {
let nexusEndpointHostname: string | undefined;
if (domainName)
Expand Down
Loading