-
Notifications
You must be signed in to change notification settings - Fork 1
/
deploy.py
297 lines (258 loc) · 12.5 KB
/
deploy.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
#!/usr/bin/env python
# Create or update a stack with neww function code, ensuring that DynamoDB tables and
# S3 buckets are properly populated with the necessary items and files.
import sys
import json
import time
import zipfile
import argparse
import tempfile
import boto3
aws_clients = {'logs': boto3.client('logs'),
's3': boto3.client('s3'),
'cloudformation': boto3.client('cloudformation'),
'lambda': boto3.client('lambda')}
class JSONArg(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
try:
parsed_val = json.loads(values)
except:
parsed_val = None
setattr(namespace, self.dest, parsed_val)
def s3_domain(s3):
if isinstance(s3, str) or isinstance(s3, unicode):
region = s3
else:
region = s3._client_config.region_name
if region == "us-east-1":
return "s3"
else:
return "s3-%s" % region
def stack_param(k, v):
if v == None:
return {'ParameterKey': k, 'UsePreviousValue': True}
else:
return {'ParameterKey': k, 'ParameterValue': v}
def deploy_lambda_code(config):
pass
def prologue(args):
try:
with open('deploy_config.json','r') as fp:
config = json.loads(fp.read())
except:
print "Unable to load configuration JSON from deploy_config.json"
exit(1)
if args.stack_name != None:
config['StackName'] = args.stack_name
else:
config['StackName'] = config['StackNamePrefix'] + str(int(time.time()))[-4:]
# Merge the parameters, overwriting the base ones with the the command-line
# provided ones.
stack_params = {}
if 'BaseStackParameters' in config and len(config['BaseStackParameters']) > 0:
stack_params.update(config['BaseStackParameters'])
del config['BaseStackParameters']
if args.stack_parameters != None:
stack_params.update(args.stack_parameters)
if len(stack_params) > 0:
config['StackParams'] = [ stack_param(k, v) for k, v in stack_params.iteritems() ]
else:
config['StackParams'] = []
# Initialize an empty conditions member, this'll get filled as we move along,
# and checked by epilogue steps.
config['Conditions'] = set()
return (config, aws_clients['cloudformation'])
def deploy_lambda_code(config, args, cfn):
print "Deploying Lambda code..."
awsl = aws_clients['lambda']
for func_name, func_def in config['LambdaFunctions'].iteritems():
print " Depoying code to logical function '%s'" % func_name
res = cfn.describe_stack_resource(StackName=config['StackName'],
LogicalResourceId=func_name)
try:
phys_id = res['StackResourceDetail']['PhysicalResourceId']
except:
print func_name
tfile = tempfile.TemporaryFile(mode='w', dir='.')
zfile = zipfile.ZipFile(tfile, 'w', zipfile.ZIP_DEFLATED)
for code_file in func_def['CodeFiles']:
zfile.write(code_file[0], code_file[1])
zfile.close()
bytes = open(tfile.name, 'r').read()
resp = awsl.update_function_code(FunctionName=phys_id,
ZipFile=bytes,
Publish=func_def['Publish'])
tfile.close()
if 'Alias' in func_def:
print " Updating alias '%s'" % func_def['Alias']
awsl.update_alias(FunctionName=phys_id,
Name=func_def['Alias'],
FunctionVersion=resp['Version'])
if 'CloudWatchLogsRetention' in func_def:
allowed_durations = [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]
# Create the log group, otherwise we can't set the retention policy.
try:
aws_clients['logs'].create_log_group(logGroupName='/aws/lambda/%s' % phys_id)
except Exception as e:
# It might already exist, and that's OK.
if e.response['Error']['Code'] != 'ResourceAlreadyExistsException':
raise e
try:
n_days = int(func_def['CloudWatchLogsRetention'])
except:
print " Unable to parse retention policy as an integer: %s" % repr(func_def['CloudWatchLogsRetention'])
else:
if n_days == 0:
print " Setting retention policy to 'Never Expire'"
aws_clients['logs'].delete_retention_policy(logGroupName='/aws/lambda/%s' % phys_id)
elif n_days not in allowed_durations:
print " Specified retention duration not in allowed set: %s" % repr(allowed_durations)
else:
print " Setting retention policy to %d days" % n_days
aws_clients['logs'].put_retention_policy(logGroupName='/aws/lambda/%s' % phys_id,
retentionInDays=n_days)
def custom_epilogue(Epilogue, Config):
try:
for svc in Epilogue['Service']:
if svc not in aws_clients:
aws_clients[svc] = boto3.client(svc)
exec("%s = aws_clients['%s']" % (svc, svc))
for c in Epilogue['Code']:
exec(c.replace('{{STACKNAME}}', Config['StackName']))
return True
except Exception as e:
print repr(e)
return False
def epilogue(Config, Args, Cfn):
# Deploy Lambda function code
if 'LambdaFunctions' in Config:
deploy_lambda_code(Config, Args, Cfn)
if 'CustomEpilogue' in Config and not (Args.subcommand == 'update-stack' and Args.no_custom_epilogue):
print "Performing custom epilogue steps."
for e in Config['CustomEpilogue']:
# Skip if the stage specifies a list of StackOps that it
# should be run with.
if 'Conditions' in e and not set(e['Conditions']).issubset(config['Conditions']):
print " SKIPPING step:", e['Name']
else:
print " Performing step:", e['Name']
if not custom_epilogue(e, Config):
print " ERROR!"
def template_kwargs(config):
ret = {'StackName': config['StackName'],
'Parameters': config['StackParams']}
if 'StackOperationConfig' in config:
soc = config['StackOperationConfig']
if 'Capabilities' in soc:
ret.update({'Capabilities': soc['Capabilities']})
if 'S3Bucket' in config['TemplateContents']:
bucket_name = None
with open(config['TemplateContents']['S3Bucket'],'r') as fp:
bucket_name = fp.read().strip()
# The body is too large, so upload it as an S3 object, and deploy by
# URL.
s3 = aws_clients['s3']
s3_key = "%s.%d" % (config['StackName'], int(time.time()))
s3.put_object(Bucket=bucket_name,
Key=s3_key,
Body=open(config['TemplateContents']['FileName'], 'r').read())
ret.update({'TemplateURL': 'https://%s.amazonaws.com/%s/%s' % \
(s3_domain(s3),
bucket_name,
s3_key)})
else:
ret.update({'TemplateBody': open(config['TemplateContents']['FileName'], 'r').read()})
return ret
# Periodically list the stack events, looking for CREATE_COMPLETE or
# UPDATE_COMPLETE.
# Since they are in reverse chronological order, it is always safe to just
# check the
# first batch. Look for LogicalResourceId matching the stack name exactly.
def wait_for_green_stack(stack_name, cfn, Timeout=300):
start = time.time()
while (time.time() - start) <= Timeout:
sys.stdout.write('.')
sys.stdout.flush()
events = cfn.describe_stack_events(StackName=stack_name)
if 'StackEvents' not in events or len(events['StackEvents']) == 0:
print
return False
e = events['StackEvents'][0]
if e['LogicalResourceId'] == stack_name:
status = e['ResourceStatus']
if status in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']:
print
return True
elif status in ['ROLLBACK_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE']:
print
return False
time.sleep(5)
print
return False
def no_stack_op(config, args, cfn):
pass
def create_stack(config, args, cfn):
# Create the stack with the stack name, template body file, parameters
resp = cfn.create_stack(**template_kwargs(config))
print json.dumps(resp, indent=4)
config['Conditions'].add('StackChanged')
return True
def update_stack(config, args, cfn):
if not args.no_cloudformation:
try:
resp = cfn.update_stack(**template_kwargs(config))
config['Conditions'].add('StackChanged')
print json.dumps(resp, indent=4)
except Exception as e:
if e.response['Error']['Message'] == 'No updates are to be performed.':
print "Stack matches current CloudFormation template, no CloudFormation updates performed."
config['Conditions'].add('NoStackChanges')
else:
raise(e)
else:
print "Skipping cloudformation step"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""
Create or update a stack with appropriate static content in the S3 bucket and priming DynamoDB table items
""")
parser.add_argument("--deploy-config", default="deploy-config.json", required=False,
help="Filename of the JSON deployment configuration file")
subparsers = parser.add_subparsers()
create_parser = subparsers.add_parser("create-stack", help="Create a new stack with an optional given stack name and parameters.")
create_parser.add_argument("--stack-name", default=None, required=False,
help="Stack name to update with new template, code, and static content.")
create_parser.add_argument("--stack-parameters", default=None, action=JSONArg,
help="Parameters for stack update/creation, given as a JSON dictionary of keys and string values. If a value is null, then the previous value is used.")
create_parser.set_defaults(callback=create_stack, subcommand="create-stack")
update_parser = subparsers.add_parser("update-stack", help="Update an existing stack, if possible, with a given name.")
update_parser.add_argument("--stack-name", default=None, required=True,
help="Stack name to update with new template, code, and static content.")
update_parser.add_argument("--stack-parameters", default=None, action=JSONArg,
help="Parameters for stack update/creation, given as a JSON dictionary of keys and string values. If a value is null, then the previous value is used.")
update_parser.add_argument("--no-cloudformation", default=False, required=False, action='store_true',
help="Only perform epilogue operations, skipping CloudFormation operations")
update_parser.add_argument("--no-epilogue", default=False, required=False, action='store_true',
help="Do not perform epilogue operations, only perform CloudFormation operations (if possible).")
update_parser.add_argument("--no-custom-epilogue", default=False, required=False, action='store_true',
help="Only perform epilogue operations, skipping CloudFormation operations")
update_parser.set_defaults(callback=update_stack, subcommand="update-stack")
args = parser.parse_args()
config, cfn = prologue(args)
print "Stack name: %s" % config['StackName']
bucket_detail = cfn.describe_stack_resource(StackName=config['StackName'],
LogicalResourceId="StaticContentBucket")
print "Final web service URL: " + \
'https://%s.amazonaws.com/%s/index.html' % \
(s3_domain(cfn),
bucket_detail['StackResourceDetail']['PhysicalResourceId'])
print "Performing stack operation (%s)..." % args.subcommand
args.callback(config, args, cfn)
print "Waiting for stack to be green..."
if wait_for_green_stack(config['StackName'], cfn):
if args.subcommand == 'update-stack' and args.no_epilogue:
print "Skipping epilogue..."
else:
print "Performing epilogue..."
epilogue(config, args, cfn)
else:
print "Stack never reached green state."