Skip to content

Commit

Permalink
restart backup if killed prematurely
Browse files Browse the repository at this point in the history
  • Loading branch information
usmannasir committed Sep 28, 2020
1 parent c254e8f commit a4feafe
Showing 1 changed file with 56 additions and 12 deletions.
68 changes: 56 additions & 12 deletions plogical/IncScheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,39 +361,77 @@ def startNormalBackups(type):


for backupjob in NormalBackupJobs.objects.all():

jobConfig = json.loads(backupjob.config)
destinationConfig = json.loads(backupjob.owner.config)

currentTime = time.strftime("%m.%d.%Y_%H-%M-%S")

if destinationConfig['type'] == 'local':

finalPath = '%s/%s' % (destinationConfig['path'].rstrip('/'), currentTime)
command = 'mkdir -p %s' % (finalPath)
ProcessUtilities.executioner(command)

if jobConfig[IncScheduler.frequency] == type:

### Check if an old job prematurely killed, then start from there.
try:
oldJobContinue = 1
pid = jobConfig['pid']
stuckDomain = jobConfig['website']
finalPath = jobConfig['finalPath']
jobConfig['pid'] = os.getpid()

command = 'ps aux'
result = ProcessUtilities.outputExecutioner(command)

if result.find(pid) > -1 and result.find('IncScheduler.py') > -1:
quit(1)


except:
### Save some important info in backup config
oldJobContinue = 0
jobConfig['pid'] = str(os.getpid())
jobConfig['finalPath'] = finalPath

NormalBackupJobLogs.objects.filter(owner=backupjob).delete()
NormalBackupJobLogs(owner=backupjob, status=backupSchedule.INFO,
message='Starting %s backup on %s..' % (type, time.strftime("%m.%d.%Y_%H-%M-%S"))).save()

actualDomain = 0
try:
if jobConfig[IncScheduler.allSites] == 'all':
websites = Websites.objects.all()
websites = Websites.objects.all().order_by('domain')
actualDomain = 1
else:
websites = backupjob.normalbackupsites_set.all()
websites = backupjob.normalbackupsites_set.all().order_by('domain__domain')
except:
websites = backupjob.normalbackupsites_set.all()
websites = backupjob.normalbackupsites_set.all().order_by('domain__domain')

doit = 0

for site in websites:

if actualDomain:
domain = site.domain
else:
domain = site.domain.domain


## Save currently backing domain in db, so that i can restart from here when prematurely killed

jobConfig['website'] = domain
backupjob.config = json.dumps(jobConfig)
backupjob.save()

if oldJobContinue and not doit:
if domain == stuckDomain:
doit = 1
pass
else:
continue

retValues = backupSchedule.createLocalBackup(domain, '/dev/null')

if retValues[0] == 0:
Expand All @@ -407,12 +445,12 @@ def startNormalBackups(type):
sender = 'root@%s' % (socket.gethostname())
TO = [adminEmail]
message = """\
From: %s
To: %s
Subject: %s
From: %s
To: %s
Subject: %s
Automatic backup failed for %s on %s.
""" % (sender, ", ".join(TO), SUBJECT, domain, currentTime)
Automatic backup failed for %s on %s.
""" % (sender, ", ".join(TO), SUBJECT, domain, currentTime)

logging.SendEmail(sender, TO, message)
else:
Expand All @@ -424,6 +462,12 @@ def startNormalBackups(type):
NormalBackupJobLogs(owner=backupjob, status=backupSchedule.INFO,
message='Backup completed for %s on %s.' % (
domain, time.strftime("%m.%d.%Y_%H-%M-%S"))).save()

jobConfig = json.loads(backupjob.config)
if jobConfig['pid']:
del jobConfig['pid']
backupjob.config = json.dumps(jobConfig)
backupjob.save()
else:
import subprocess
import shlex
Expand All @@ -440,12 +484,12 @@ def startNormalBackups(type):
actualDomain = 0
try:
if jobConfig[IncScheduler.allSites] == 'all':
websites = Websites.objects.all()
websites = Websites.objects.all().order_by('domain')
actualDomain = 1
else:
websites = backupjob.normalbackupsites_set.all()
websites = backupjob.normalbackupsites_set.all().order_by('domain__domain')
except:
websites = backupjob.normalbackupsites_set.all()
websites = backupjob.normalbackupsites_set.all().order_by('domain__domain')

for site in websites:

Expand Down

0 comments on commit a4feafe

Please sign in to comment.