/
AutoScaleALL.py
611 lines (551 loc) · 34.8 KB
/
AutoScaleALL.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
#!/home/opc/py36env/bin/python
# OCI - Scheduled Auto Scaling Script
# Written by: Richard Garsthagen - richard@oc-blog.com
# Co-Developers: Joel Nation (https://github.com/Joelith)
# Version 2.1 - May 20200
#
# More info see: www.oc-blog.com
#
import oci
import datetime
import threading
import time
import sys
import requests
# You can modify / translate the tag names used by this script - case sensitive!!!
PredefinedTag = "Schedule"
AnyDay = "AnyDay"
Weekend = "Weekend"
WeekDay = "WeekDay"
Daysofweek = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
# ============== CONFIGURE THIS SECTION ======================
# OCI Configuration
UseInstancePrinciple = False
#Location of config file
#configfile = "c:\\Users\\UserName\\.oci\\config"
configfile = "~/.oci/config"
ComputeShutdownMethod = "SOFTSTOP"
LogLevel = "ALL" # Use ALL or ERRORS. When set to ERRORS only a notification will be published if error occurs
TopicID = "" # Enter Topic OCID if you want the script to publish a message about the scaling actions
RateLimitDelay = 2 # Time in seconds to wait before retry of operation
# ============================================================
ErrorsFound = False
# Configure logging output
def MakeLog(msg):
print (msg)
Action = "All" # Default, do all up/on and down/off scaling actions
if len(sys.argv) == 2:
if sys.argv[1].upper() == "UP":
Action = "Up"
if sys.argv[1].upper() == "DOWN":
Action = "Down"
if UseInstancePrinciple:
signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner()
MakeLog("Using Instance principle")
else:
signer = None
MakeLog("Starting Auto Scaling script, executing {} actions".format(Action))
class AutonomousThread (threading.Thread):
def __init__(self, threadID, ID, NAME, CPU):
threading.Thread.__init__(self)
self.threadID = threadID
self.ID = ID
self.NAME = NAME
self.CPU = CPU
def run(self):
MakeLog("Starting Autonomous DB {} and after that scaling to {} cpus".format(self.NAME, self.CPU) )
Retry = True
while Retry:
try:
response = database.start_autonomous_database(autonomous_database_id=self.ID)
Retry = False
success.append("Started Autonomous DB {}".format(self.NAME))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeLog("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append("Error ({}) Starting Autonomous DB {}".format(response.status, self.NAME))
Retry = False
while response.data.lifecycle_state != "AVAILABLE":
response = database.get_autonomous_database(autonomous_database_id=self.ID)
time.sleep(10)
MakeLog("Autonomous DB {} started, re-scaling to {} cpus".format(self.NAME, self.CPU))
dbupdate = oci.database.models.UpdateAutonomousDatabaseDetails()
dbupdate.cpu_core_count = self.CPU
Retry = True
while Retry:
try:
response = database.update_autonomous_database(autonomous_database_id=self.ID, update_autonomous_database_details=dbupdate)
Retry = False
success.append("Autonomous DB {} started, re-scaling to {} cpus".format(self.NAME, self.CPU))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeLog("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
errors.append("Error ({}) re-scaling to {} cpus for {}".format(response.status, self.CPU, self.NAME))
Retry = False
class PoolThread (threading.Thread):
def __init__(self, threadID, ID, NAME, INSTANCES):
threading.Thread.__init__(self)
self.threadID = threadID
self.ID = ID
self.NAME = NAME
self.INSTANCES = INSTANCES
def run(self):
MakeLog("Starting Instance Pool {} and after that scaling to {} instances".format(self.NAME, self.INSTANCES) )
Retry = True
while Retry:
try:
response = pool.start_instance_pool(instance_pool_id=self.ID)
Retry = False
success.append("Starting Instance Pool {}".format(self.NAME))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeLog("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
errors.append("Error ({}) starting instance pool {}".format(response.status, self.NAME))
Retry = False
while response.data.lifecycle_state != "RUNNING":
response = pool.get_instance_pool(instance_pool_id=self.ID)
time.sleep(10)
MakeLog("Instance pool {} started, re-scaling to {} instances".format(self.NAME, self.INSTANCES))
pooldetails = oci.core.models.UpdateInstancePoolDetails()
pooldetails.size = self.INSTANCES
Retry = True
while Retry:
try:
response = pool.update_instance_pool(instance_pool_id=self.ID, update_instance_pool_details=pooldetails)
Retry = False
success.append("Rescaling Instance Pool {} to {} instances".format(self.NAME, self.INSTANCES))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeLog("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append("Error ({}) rescaling instance pool {}".format(response.status, self.NAME))
Retry = False
if UseInstancePrinciple:
userName = "Instance Principle"
try:
url = "http://169.254.169.254/opc/v1/instance/"
data = requests.get(url).json()
except:
MakeLog("This instance is not running on OCI or does not have Instance Principle permissions")
exit()
region = data['region']
compID = data['compartmentId']
if compID[:14] == "ocid1.tenancy.":
RootCompartmentID = compID
SearchRootID = False
else:
SearchRootID = True
SearchCompID = compID
identity = oci.identity.IdentityClient(config={}, signer=signer)
compute = oci.core.ComputeClient(config={}, signer=signer)
database = oci.database.DatabaseClient(config={}, signer=signer)
pool = oci.core.ComputeManagementClient(config={}, signer=signer)
search = oci.resource_search.ResourceSearchClient(config={}, signer=signer)
ns = oci.ons.NotificationDataPlaneClient(config={}, signer=signer)
while SearchRootID:
compartment = identity.get_compartment(compartment_id=SearchCompID).data
if compartment.compartment_id[:14] == "ocid1.tenancy.":
RootCompartmentID = compartment.compartment_id
SearchRootID = False
else:
SearchCompID = compartment.compartment_id
else:
config = oci.config.from_file(configfile)
identity = oci.identity.IdentityClient(config)
compute = oci.core.ComputeClient(config)
database = oci.database.DatabaseClient(config)
pool = oci.core.ComputeManagementClient(config)
search = oci.resource_search.ResourceSearchClient(config)
ns = oci.ons.NotificationDataPlaneClient(config)
user = identity.get_user(config["user"]).data
userName = user.description
RootCompartmentID = config["tenancy"]
region = config["region"]
# Check credentials and enabled regions
Tenancy = identity.get_tenancy(tenancy_id=RootCompartmentID).data
MakeLog ("Logged in as: {}/{} @ {}".format(userName, Tenancy.name, region))
regions = identity.list_region_subscriptions(RootCompartmentID).data
regionnames = ""
for region in regions:
regionnames = regionnames + region.region_name + " "
MakeLog ("Enabled regions: {}".format(regionnames))
threads = [] # Thread array for async AutonomousDB start and rescale
tcount = 0
# Get Current Day, time
DayOfWeek = datetime.datetime.today().weekday() # Day of week as a number
Day = Daysofweek[DayOfWeek] # Day of week as string
CurrentHour = datetime.datetime.now().hour
MakeLog ("Day of week: {} - Current hour: {}".format(Day,CurrentHour))
#Array start with 0 so decrease CurrentHour with 1
CurrentHour = CurrentHour -1
# Find all resources with a Schedule Tag
query = "query all resources where (definedTags.namespace = '{}')".format(PredefinedTag)
sdetails = oci.resource_search.models.StructuredSearchDetails()
sdetails.query = query
result = search.search_resources(search_details=sdetails, limit=1000).data
# All the items with a schedule are now collected.
# Let's go thru them and find / validate the correct schedule
total_resources = len(result.items)
success=[]
errors=[]
for resource in result.items:
MakeLog ("Checking: {} - {}".format(resource.display_name, resource.resource_type))
schedule = resource.defined_tags[PredefinedTag]
ActiveSchedule = ""
if AnyDay in schedule:
ActiveSchedule = schedule[AnyDay]
if DayOfWeek < 5 : #check for weekday / weekend
if WeekDay in schedule:
ActiveSchedule = schedule[WeekDay]
else:
if Weekend in schedule:
ActiveSchedule = schedule[Weekend]
if Day in schedule: # Check for day specific tag (today)
ActiveSchedule = schedule[Day]
# Check is the active schedule contains exactly 24 numbers for each hour of the day
try:
schedulehours = ActiveSchedule.split(",")
#MakeLog(ActiveSchedule)
#MakeLog(schedulehours)
if len(schedulehours) != 24:
ErrorsFound = True
errors.append("Error with schedule of {} - {}, not correct amount of hours")
MakeLog("Error with schedule of {} - {}, not correct amount of hours".format(resource.display_name, ActiveSchedule))
ActiveSchedule = ""
except:
ErrorsFound = True
ActiveSchedule = ""
errors.append("Error with schedule for {}".format(resource.display_name))
MakeLog("Error with schedule of {}".format(resource.display_name))
# if schedule validated, let see if we can apply the new schedule to the resource
if ActiveSchedule != "":
#print("Active schedule for {} : {}".format(resource.display_name, ActiveSchedule))
# Execute On/Off operations for compute VMs
if resource.resource_type == "Instance":
if int(schedulehours[CurrentHour]) == 0 or int(schedulehours[CurrentHour]) == 1:
resourceDetails = compute.get_instance(instance_id=resource.identifier).data
# Only perform action if VM Instance, ignoring any BM instances.
if resourceDetails.shape[:2] == "VM":
if resourceDetails.lifecycle_state == "RUNNING" and int(schedulehours[CurrentHour]) == 0:
if Action == "All" or Action == "Down":
MakeLog("Initiate Compute VM shutdown for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = compute.instance_action(instance_id=resource.identifier, action=ComputeShutdownMethod)
Retry = False
success.append("Initiate Compute VM shutdown for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeLog("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append("Error ({}) Compute VM Shutdown for {}".format(response.status, resource.display_name))
MakeLog("Error ({}) Compute VM Shutdown for {}".format(response.status, resource.display_name))
Retry = False
if resourceDetails.lifecycle_state == "STOPPED" and int(schedulehours[CurrentHour]) == 1:
if Action == "All" or Action == "Up":
MakeLog("Initiate Compute VM startup for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = compute.instance_action(instance_id=resource.identifier, action="START")
Retry = False
success.append("Initiate Compute VM startup for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeLog("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append("Error ({}) Compute VM startup for {}".format(response.status, resource.display_name))
Retry = False
if resource.resource_type == "DbSystem":
resourceDetails = database.get_db_system(db_system_id=resource.identifier).data
# Execute On/Off operations for Database VMs
if resourceDetails.shape[:2] == "VM":
dbnodedetails = database.list_db_nodes(compartment_id=resource.compartment_id, db_system_id=resource.identifier).data[0]
if int(schedulehours[CurrentHour]) == 0 or int(schedulehours[CurrentHour]) == 1:
if dbnodedetails.lifecycle_state == "AVAILABLE" and int(schedulehours[CurrentHour]) == 0:
if Action == "All" or Action == "Down":
MakeLog("Initiate DB VM shutdown for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = database.db_node_action(db_node_id=dbnodedetails.id, action="STOP")
Retry = False
success.append("Initiate DB VM shutdown for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeLog("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append("Error ({}) DB VM shutdown for {}".format(response.status, resource.display_name))
Retry = False
if dbnodedetails.lifecycle_state == "STOPPED" and int(schedulehours[CurrentHour]) == 1:
if Action == "All" or Action == "Up":
MakeLog("Initiate DB VM startup for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = database.db_node_action(db_node_id=dbnodedetails.id, action="START")
Retry = False
success.append("Initiate DB VM startup for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeLog("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append("Error ({}) DB VM startup for {}".format(response.status, resource.display_name))
Retry = False
# Execute CPU Scale Up/Down operations for Database BMs
if resourceDetails.shape[:2] == "BM":
if int(schedulehours[CurrentHour]) > 1 and int(schedulehours[CurrentHour]) < 53:
if resourceDetails.cpu_core_count > int(schedulehours[CurrentHour]):
if Action == "All" or Action == "Down":
MakeLog("Initiate DB BM Scale Down to {} for {}".format(int(schedulehours[CurrentHour]),resource.display_name))
dbupdate = oci.database.models.UpdateDbSystemDetails()
dbupdate.cpu_core_count = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = database.update_db_system(db_system_id=resource.identifier, update_db_system_details=dbupdate)
Retry = False
success.append("Initiate DB BM Scale Down to {} for {}".format(int(schedulehours[CurrentHour]),resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeLog("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append("Error ({}) DB BM Scale Down to {} for {}".format(response.status, int(schedulehours[CurrentHour]),resource.display_name))
Retry = False
if resourceDetails.cpu_core_count < int(schedulehours[CurrentHour]):
if Action == "All" or Action == "Up":
MakeLog("Initiate DB BM Scale UP to {} for {}".format(int(schedulehours[CurrentHour]),resource.display_name))
dbupdate = oci.database.models.UpdateDbSystemDetails()
dbupdate.cpu_core_count = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = database.update_db_system(db_system_id=resource.identifier, update_db_system_details=dbupdate)
Retry = False
success.append("Initiate DB BM Scale UP to {} for {}".format(int(schedulehours[CurrentHour]),resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeLog("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append("Error ({}) DB BM Scale UP to {} for {}".format(response.status, int(schedulehours[CurrentHour]),resource.display_name))
Retry = False
# Execute CPU Scale Up/Down operations for Database BMs
if resource.resource_type == "AutonomousDatabase":
if int(schedulehours[CurrentHour]) >= 0 and int(schedulehours[CurrentHour]) < 129:
resourceDetails = database.get_autonomous_database(autonomous_database_id=resource.identifier).data
# Autonomous DB is running request is amount of CPU core change is needed
if resourceDetails.lifecycle_state == "AVAILABLE" and int(schedulehours[CurrentHour]) > 0:
if resourceDetails.cpu_core_count > int(schedulehours[CurrentHour]):
if Action == "All" or Action == "Down":
MakeLog("Initiate Autonomous DB Scale Down to {} for {}".format(int(schedulehours[CurrentHour]),
resource.display_name))
dbupdate = oci.database.models.UpdateAutonomousDatabaseDetails()
dbupdate.cpu_core_count = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = database.update_autonomous_database(autonomous_database_id=resource.identifier, update_autonomous_database_details=dbupdate)
Retry = False
success.append("Initiate Autonomous DB Scale Down to {} for {}".format(int(schedulehours[CurrentHour]), resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeLog("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append("Error ({}) Autonomous DB Scale Down to {} for {}".format(response.status, int(schedulehours[CurrentHour]),resource.display_name))
Retry = False
if resourceDetails.cpu_core_count < int(schedulehours[CurrentHour]):
if Action == "All" or Action == "Up":
MakeLog("Initiate Autonomous DB Scale Up to {} for {}".format(int(schedulehours[CurrentHour]),
resource.display_name))
dbupdate = oci.database.models.UpdateAutonomousDatabaseDetails()
dbupdate.cpu_core_count = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = database.update_autonomous_database(autonomous_database_id=resource.identifier,
update_autonomous_database_details=dbupdate)
Retry = False
success.append("Initiate Autonomous DB Scale Up to {} for {}".format(
int(schedulehours[CurrentHour]), resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeLog("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append("Error ({}) Autonomous DB Scale Up to {} for {}".format(response.status, int(schedulehours[CurrentHour]), resource.display_name))
Retry = False
# Autonomous DB is running request is to stop the database
if resourceDetails.lifecycle_state == "AVAILABLE" and int(schedulehours[CurrentHour]) == 0:
if Action == "All" or Action == "Down":
MakeLog("Stoping Autonomous DB {}".format(resource.display_name))
Retry = True
while Retry:
try:
response= database.stop_autonomous_database(autonomous_database_id=resource.identifier)
Retry = False
success.append("Initiate Autonomous DB Shutdown for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeLog("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append("Error ({}) Autonomous DB Shutdown for {}".format(response.status, resource.display_name))
Retry = False
if resourceDetails.lifecycle_state == "STOPPED" and int(schedulehours[CurrentHour]) > 0:
if Action == "All" or Action == "Up":
# Autonomous DB is stopped and needs to be started with same amount of CPUs configured
if resourceDetails.cpu_core_count == int(schedulehours[CurrentHour]):
MakeLog("Starting Autonomous DB {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = database.start_autonomous_database(autonomous_database_id=resource.identifier)
Retry = False
success.append("Initiate Autonomous DB Startup for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeLog("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append("Error ({}) Autonomous DB Startup for {}".format(response.status,resource.display_name))
Retry = False
# Autonomous DB is stopped and needs to be started, after that it requires CPU change
if resourceDetails.cpu_core_count != int(schedulehours[CurrentHour]):
tcount = tcount + 1
thread = AutonomousThread(tcount, resource.identifier, resource.display_name , int(schedulehours[CurrentHour]))
thread.start()
threads.append(thread)
if resource.resource_type == "InstancePool":
resourceDetails = pool.get_instance_pool(instance_pool_id=resource.identifier).data
# Stop Resource pool action
if resourceDetails.lifecycle_state == "RUNNING" and int(schedulehours[CurrentHour]) == 0:
if Action == "All" or Action == "Down":
success.append("Stopping instance pool {}".format(resource.display_name))
MakeLog("Stopping instance pool {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = pool.stop_instance_pool(instance_pool_id=resource.identifier)
Retry = False
success.append("Stopping instance pool {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeLog("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append("Error ({}) Stopping instance pool for {}".format(response.status,resource.display_name))
Retry = False
# Scale up action on running instance pool
elif resourceDetails.lifecycle_state == "RUNNING" and int(schedulehours[CurrentHour]) > resourceDetails.size:
if Action == "All" or Action == "Up":
MakeLog("Scaling up instance pool {} to {} instances".format(resource.display_name, int(schedulehours[CurrentHour])))
pooldetails = oci.core.models.UpdateInstancePoolDetails()
pooldetails.size = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = pool.update_instance_pool(instance_pool_id=resource.identifier, update_instance_pool_details=pooldetails)
Retry = False
success.append("Scaling up instance pool {} to {} instances".format(resource.display_name, int(schedulehours[CurrentHour])))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeLog("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append("Error ({}) Scaling up instance pool {} to {} instances".format(response.status, resource.display_name, int(schedulehours[CurrentHour])))
Retry = False
# Scale down action on running instance pool
elif resourceDetails.lifecycle_state == "RUNNING" and int(schedulehours[CurrentHour]) < resourceDetails.size:
if Action == "All" or Action == "Down":
MakeLog("Scaling down instance pool {} to {} instances".format(resource.display_name, int(schedulehours[CurrentHour]) ))
pooldetails = oci.core.models.UpdateInstancePoolDetails()
pooldetails.size = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = pool.update_instance_pool(instance_pool_id=resource.identifier, update_instance_pool_details=pooldetails)
Retry = False
success.append("Scaling down instance pool {} to {} instances".format(resource.display_name, int(schedulehours[CurrentHour])))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeLog("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append("Error ({}) Scaling down instance pool {} to {} instances".format(response.status,resource.display_name,int(schedulehours[CurrentHour])))
Retry = False
elif resourceDetails.lifecycle_state == "STOPPED" and int(schedulehours[CurrentHour]) > 0:
if Action == "All" or Action == "Up":
# Start instance pool with same amount of instances as configured
if resourceDetails.size == int(schedulehours[CurrentHour]):
success.append("Starting instance pool {} from stopped state".format(resource.display_name))
MakeLog("Starting instance pool {} from stopped state".format(resource.display_name))
Retry = True
while Retry:
try:
response = pool.start_instance_pool(instance_pool_id=resource.identifier)
Retry = False
success.append("Starting instance pool {} from stopped state".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeLog("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append("Error ({}) Starting instance pool {} from stopped state".format(response.status, resource.display_name))
Retry = False
# Start instance pool and after that resize the instance pool to desired state:
if resourceDetails.size != int(schedulehours[CurrentHour]):
tcount = tcount + 1
thread = PoolThread(tcount, resource.identifier, resource.display_name,int(schedulehours[CurrentHour]))
thread.start()
threads.append(thread)
# Wait for any AutonomousDB and Instance Pool Start and rescale tasks completed
for t in threads:
t.join()
if len(TopicID) > 0:
if LogLevel == "ALL" or (LogLevel == "ERRORS" and ErrorsFound):
MakeLog("Publishing notification")
body_message = "Scaling ({}) just completed. Found {} errors across {} scaleable instances (from a total of {} instances). \nError Details: {}\n\nSuccess Details: {}".format(Action, len(errors),len(success),total_resources,errors,success)
Retry = True
while Retry:
try:
response = ns.publish_message(TopicID, {"title": "Scaling Script ran across tenancy: {}".format(Tenancy.name),"body": body_message})
Retry = False
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeLog("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
MakeLog("Error ({}) publishing notification".format(response.status))
Retry = False
MakeLog ("All scaling tasks done")