Permalink
Switch branches/tags
Nothing to show
Find file
Fetching contributors…
Cannot retrieve contributors at this time
456 lines (426 sloc) 17.1 KB
<?php
/**
* Master controller for performing all database backups.
* NO MODIFICATIONS TO THIS FILE ARE NECESSARY!!!
*/
//Load the misc. global configuration settings
if (!file_exists(dirname(__FILE__).'/config.ini.php')) {
die("No config.ini.php file found. Exiting.\n");
}
include dirname(__FILE__).'/config.ini.php';
//Make sure that the config file is setup correctly and required libraries are available
if (!is_dir(ARCHIVE_DIR)) {
die("Archive directory - " . ARCHIVE_DIR . " - does not exist or access was denied.\n");
}
if (!file_exists(MYSQLDUMP_BIN)) {
die('Executable mysqldump not found at '.MYSQLDUMP_BIN."\n");
}
if (!function_exists('mysqli_init')) {
die("PHP mysqli library is not installed.\n");
}
if (!class_exists('SQLite3')) {
die("SQLite3 library is not installed.\n");
}
error_reporting(E_ALL);
$GLOBALS['errors'] = array();
//We define start time as the moment the script starts so that all later time comparisons
//aren't impacted by how long the script takes
define('START_TIME', time());
//Don't let errors from MySQLi go to the error log, since this script will handle them
mysqli_report(MYSQLI_REPORT_OFF);
//Be fault-tolerant of the local database not existing, and set a trigger to create the initial tables if necessary
$dbExists = file_exists(dirname(__FILE__).'/history.db') ? true : false;
//Handler to let us pull STDERR and STDOUT from proc_open
$descriptorSpec = array(
0 => array("pipe", "r"), // stdin
1 => array("pipe", "w"), // stdout
2 => array("pipe", "w"), // stderr
);
//Access our self-contained database
try {
$db = new SQLite3(dirname(__FILE__).'/history.db');
} catch (Exception $e) {
if ($dbExists === false) {
die("Unable to create SQLite database, likely because history.db cannot be created in ".dirname(__FILE__)."\n");
} else {
die("Unable to load SQLite database: ".$e->getMessage()."\n");
}
}
//Include all our specialized functions
include dirname(__FILE__).'/includes/functions.inc.php';
//If we created the database, seed it with the appropriate tables
if ($dbExists === false) {
$db->query('CREATE TABLE db_history (db text, last_update int, PRIMARY KEY (db));');
$db->query('CREATE TABLE tbl_history (db text, tbl text, last_update int, PRIMARY KEY (db, tbl));');
} else { //Verify that the two tables exist
$q = $db->query('SELECT name FROM sqlite_master WHERE type = "table";');
$tables = array();
while ($res = $q->fetchArray()) {
$tables[] = $res['name'];
}
if (!in_array('db_history', $tables) || !in_array('tbl_history', $tables)) {
die("'history.db' is not a valid SQLite database, or does not contain the valid tables.\n");
}
}
$connection = mysqli_init();
//Make sure the parent directory for the dumps exists
$parentBackupDir = sys_get_temp_dir()."/sql-".date("Ymd", START_TIME)."/";
if (!is_dir($parentBackupDir)) mkdir($parentBackupDir,0700);
//Loop through every configuration file and perform the backups
$iniPath = dirname(__FILE__).'/config.d/';
foreach (scandir($iniPath) AS $iniFile) {
//Skip the directory files and non-ini files
if (strlen($iniFile) < 3 || substr($iniFile, -3) != 'ini') continue;
//Read the config into an array
$iniSettings = parse_ini_file($iniPath.$iniFile, true);
$backupDir = $parentBackupDir.$iniSettings['core']['machine'].'/';
if (!is_dir($backupDir)) mkdir($backupDir,0700);
//Store connection information as shorter variables, to make the code easier
$machine = isset($iniSettings['core']['machine']) ? $iniSettings['core']['machine'] : false;
$host = isset($iniSettings['core']['host']) ? $iniSettings['core']['host'] : false;
$user = isset($iniSettings['core']['user']) ? $iniSettings['core']['user'] : false;
$pass = isset($iniSettings['core']['pass']) ? $iniSettings['core']['pass'] : false;
//Prevent further processing for this database if not all connection specifications exist
if ($host === false || $user === false || $pass === false) {
logError('['.$iniFile.'] Not all necessary connection configs provided.');
continue;
}
//Make sure the machine has a directory in the archive
if (!is_dir(ARCHIVE_DIR.$machine)) {
mkdir(ARCHIVE_DIR.$machine);
}
$connection->real_connect($host, $user, $pass);
if ($connection->connect_error) {
logError('['.$iniFile.'] Unable to establish database connection to '.$host . "\n\t"
. 'Connect Error (' . $connection->connect_errno . ') ' . $connection->connect_error);
continue;
}
echo "Backing up server '$host'\n";
$databases = array();
$query = $connection->query("SHOW DATABASES");
while ($row = $query->fetch_object()) {
$databases[] = $row->Database;
}
foreach ($databases AS $database) {
$frequency = "daily";
//Figure out any specific settings for this database
if (isset($iniSettings[$database]['frequency'])) {
$frequency = $iniSettings[$database]['frequency'];
}
//If we're never supposed to backup this database, skip it.
if ($frequency == "never") {
echo "\t$database is set to never update, skipping...\n";
continue;
}
//Determine if we'll dump table-by-table or all at once
$dumpFullDatabase = false;
if ($database == 'mysql' || !empty($iniSettings[$database]['fullDump'])) {
$dumpFullDatabase = true;
}
//If we're supposed to backup this database other than "daily", see if now is the time
$q = $db->query("SELECT last_update FROM db_history WHERE db='$database'");
$res = $q->fetchArray();
$lastUpdate = $res['last_update'];
if ($lastUpdate == false) {
$db->query("INSERT INTO db_history VALUES ('$database', 1)");
$lastUpdate = 1;
}
if ($frequency == "monthly" && $lastUpdate > (START_TIME-84000*30)) {
echo "\t$database is set to update monthly and was last updated ".date("Y-m-d", $lastUpdate).", skipping...\n";
continue;
} elseif ($frequency == "weekly" && $lastUpdate > (START_TIME-84000*7)) {
echo "\t$database is set to update weekly and was last updated ".date("Y-m-d", $lastUpdate).", skipping...\n";
continue;
} elseif ($frequency == "daily" && $lastUpdate > (START_TIME-84000)) {
echo "\t$database is set to update daily and was last updated ".date("Y-m-d h:i:s", $lastUpdate).", skipping...\n";
continue;
}
echo "\tDoing backup on $database\n";
$innerDir = $backupDir.$database.'/';
mkdir($backupDir.$database);
$retryCount = 0;
//Make sure our connection is still active, otherwise reconnect
$query = $connection->query("SELECT NOW()");
if ($query === false) {
if ($connection->real_connect($host, $user, $pass) === false) {
logError("[$iniFile] Connection error on $database\n\t"
. $connection->error);
continue;
}
}
$databaseSuccess = true;
if ($dumpFullDatabase === true) {
echo "\t\tDumping database all at once...\n";
$command = MYSQLDUMP_BIN." -h $host -u $user --password=$pass -qe $flag ".$database." | gzip -f > ".$innerDir.$database.".sql.gz";
$process = proc_open($command, $descriptorSpec, $pipes);
if (is_resource($process)) {
//Never needed since we pipe all STDOUT to gzip, but run to close the stream
$stdout = stream_get_contents($pipes[1]);
fclose($pipes[1]);
$stderr = stream_get_contents($pipes[2]);
fclose($pipes[2]);
if (!empty($stderr)) {
$databaseSuccess = false;
logError('['.$iniFile."] Failed to run mysqldump\n\t"
. $command . "\n\t"
. $stderr);
}
proc_close($process);
} else {
$databaseSuccess = false;
}
} else {
echo "\t\tDumping database by table...\n";
if ($connection->select_db($database) === false) {
$connection->real_connect($host, $user, $pass, $database);
}
//Be fault tolerant of not being able to get all the tables
while ($retryCount < 5) {
if (($query = $connection->query("SHOW FULL TABLES FROM $database")) !== false) {
break;
}
sleep(10);
$connection->real_connect($host, $user, $pass, $database);
$retryCount++;
}
if ($retryCount == 5 && $query === false) {
logError('['.$iniFile.'] Query backup failed on '.$database."\n\t"
. "Error: ".$connection->error);
break;
}
$schemaTables = isset($iniSettings[$database]['schema_only']) ?
$iniSettings[$database]['schema_only'] : array();
$tables = array();
while ($row2 = $query->fetch_row()) {
$tables[] = array(
'name' => $row2[0],
'type' => $row2[1],
);
}
foreach ($tables AS $tableInfo) {
$table = $tableInfo['name'];
$type = $tableInfo['type'];
$flag = "";
$needDump = true;
$localSuccess = false;
if ($type == 'VIEW' || $type == 'MEMORY' || $type == 'CSV') {
$needDump = false;
$retryCount = 0;
while (true) {
$query = $connection->query("SHOW CREATE TABLE $table");
if ($query !== false) {
break;
} elseif ($retryCount == 5) {
logError('['.$iniFile.'] Query backup failed on '.$database." ".$table."\n\t"
. "Error: ".$connection->error);
continue 2;
}
sleep(10);
$retryCount++;
$connection->real_connect($host, $user, $pass, $database);
}
$row = $query->fetch_row();
if (isset($row[1])) {
$localSuccess = true;
file_put_contents($innerDir.$table.'.sql',$row[1]);
} else {
logError('['.$iniFile.'] Schema dump of $database -> $table failed.');
}
echo "\t\t\tDumping 'SHOW CREATE TABLE' on table $table\n";
} elseif (in_array($table, $schemaTables)) {
$flag = "--no-data";
echo "\t\t\tDoing structure backup on table $table\n";
} else {
echo "\t\t\tDoing full backup on table $table\n";
}
if ($needDump === true) {
$result = array();
$command = MYSQLDUMP_BIN . " -h $host -u $user --password=$pass -qe $flag ".$database." ".$table." | gzip -f > ".$innerDir.$table.".sql.gz";
$process = proc_open($command, $descriptorSpec, $pipes);
if (is_resource($process)) {
//Never needed since we pipe all STDOUT to gzip, but run to close the stream
$stdout = stream_get_contents($pipes[1]);
fclose($pipes[1]);
$stderr = stream_get_contents($pipes[2]);
fclose($pipes[2]);
if (empty($stderr)) {
$localSuccess = true;
} else {
logError('['.$iniFile."] Failed to run mysqldump\n\t"
. $command . "\n\t"
. $stderr);
}
proc_close($process);
}
}
if ($localSuccess === true) {
$db->query("REPLACE INTO tbl_history VALUES ('$database', '$table', '".time()."')");
} else {
$databaseSuccess = false;
}
}
}
//Mark a successful update
if ($databaseSuccess === true) {
$db->query("UPDATE db_history SET last_update=".time()." WHERE db='$database'");
//If successfully backed up the files, move them to S3
//Make sure the database has a directory in the machine's archive
if (!is_dir(ARCHIVE_DIR.$machine.'/'.$database)) {
mkdir(ARCHIVE_DIR.$machine.'/'.$database);
}
//Make sure we have the directory structure in place
list($dailyKept, $weeklyKept, $monthlyKept) = getRanges($iniSettings, $database);
if ($dailyKept < 1) {
logError("[$iniFile] Daily Number Kept appears to be less than one for $database");
$dailyKept = 1;
}
//We always keep dailies, so we don't need to check the number
$dailyPath = ARCHIVE_DIR.$machine.'/'.$database.'/daily/';
if (!is_dir($dailyPath)) {
mkdir($dailyPath);
}
$weeklyPath = ARCHIVE_DIR.$machine.'/'.$database.'/weekly/';
if ($weeklyKept > 0 && !is_dir($weeklyPath)) {
mkdir($weeklyPath);
}
$monthlyPath = ARCHIVE_DIR.$machine.'/'.$database.'/monthly/';
if ($monthlyKept > 0 && !is_dir($monthlyPath)) {
mkdir($monthlyPath);
}
//Move the most recent snapshot to archive store
echo "\t\tMoving $database backup to daily archive\n";
system('mv '.$innerDir.' '.$dailyPath.date('Y-m-d', START_TIME), $moveFail);
if ($moveFail === true) {
logError("[$iniFile] Failed to move $innerDir to "
. $dailyPath.date('Y-m-d', START_TIME));
} else {
//Figure out if we can keep all the backups, or if one should be moved/deleted
$oldestBackups = array(
'daily' => array(
'origPath' => $dailyPath,
'keepCount' => $dailyKept,
'hasCount' => 0,
'oldestTime' => time(),
'youngestTime' => 0,
'path' => false,
'action' => 'keep',
'movePath' => false,
),
'weekly' => array(
'origPath' => $weeklyPath,
'keepCount' => $weeklyKept,
'hasCount' => 0,
'oldestTime' => time(),
'youngestTime' => 0,
'path' => false,
'action' => 'keep',
'movePath' => false,
),
'monthly' => array(
'origPath' => $monthlyPath,
'keepCount' => $monthlyKept,
'hasCount' => 0,
'oldestTime' => time(),
'youngestTime' => 0,
'path' => false,
'action' => 'keep',
'movePath' => false,
),
);
//Calculate the number and oldest directories in each structure
foreach ($oldestBackups AS $type => $info) {
foreach (scandir($info['origPath']) AS $backupPath) {
if (strlen($backupPath) < 3) continue;
$oldestBackups[$type]['hasCount']++;
$fileTime = filemtime($info['origPath'].$backupPath);
if ($fileTime < $oldestBackups[$type]['oldestTime']) {
$oldestBackups[$type]['oldestTime'] = $fileTime;
$oldestBackups[$type]['path'] = $info['origPath'].$backupPath;
}
if ($fileTime > $oldestBackups[$type]['youngestTime']) {
$oldestBackups[$type]['youngestTime'] = $fileTime;
}
}
}
//If we have to rotate dailies, figure out if the oldest weekly needs to rotate
$analyzeWeekly = false;
$analyzeMonthly = false;
if ($oldestBackups['daily']['hasCount'] > $oldestBackups['daily']['keepCount']) {
if ($oldestBackups['weekly']['youngestTime'] > (START_TIME-7*86400)) {
$youngDaysOld = floor((time()-$oldestBackups['weekly']['youngestTime'])/86400);
echo "\t\tDeleting oldest daily backup since youngest weekly is $youngDaysOld days old\n";
$oldestBackups['daily']['action'] = 'delete';
//If we don't have any weeklies, make the oldest daily a monthly
} elseif ($oldestBackups['weekly']['hasCount'] < $oldestBackups['weekly']['keepCount']) {
$oldestBackups['daily']['action'] = 'move';
echo "\t\tMoving oldest daily backup to weekly\n";
$oldestBackups['daily']['movePath'] = $oldestBackups['weekly']['origPath'];
} elseif ($oldestBackups['weekly']['oldestTime'] < (START_TIME-7*86400)) {
$oldestBackups['daily']['action'] = 'move';
$oldestBackups['daily']['movePath'] = $oldestBackups['weekly']['origPath'];
$analyzeWeekly = true;
echo "\t\tMoving oldest daily backup to weekly and analyzing weekly\n";
} else {
$oldestBackups['daily']['action'] = 'delete';
echo "\t\tDeleting oldest daily backup\n";
}
}
//Figure out if weeklies need to be rotated or deleted
if ($analyzeWeekly && $oldestBackups['weekly']['hasCount'] == $oldestBackups['weekly']['keepCount']) {
if ($oldestBackups['monthly']['youngestTime'] > (START_TIME-4*7*86400)) {
$youngDaysOld = floor((time()-$oldestBackups['monthly']['youngestTime'])/86400);
echo "\t\tDeleting oldest weekly backup since youngest monthly is $youngDaysOld days old\n";
$oldestBackups['weekly']['action'] = 'delete';
//If we don't have any monthlies, make the oldest weekly a monthly
} elseif ($oldestBackups['monthly']['hasCount'] <= $oldestBackups['monthly']['keepCount']) {
$oldestBackups['weekly']['action'] = 'move';
$oldestBackups['weekly']['movePath'] = $oldestBackups['monthly']['origPath'];
echo "\t\tMoving oldest weekly backup to monthly\n";
}
//If the oldest monthly is older than 4 weeks, move the oldest weekly there
elseif ($oldestBackups['monthly']['oldestTime'] < (START_TIME-4*7*86400)) {
echo "\t\tMoving oldest weekly backup to monthly\n";
$oldestBackups['weekly']['action'] = 'move';
$oldestBackups['weekly']['movePath'] = $oldestBackups['monthly']['origPath'];
//If we have enough monthly, delete the oldest
if ($oldestBackups['monthly']['hasCount'] >= $oldestBackups['monthly']['keepCount']) {
$oldestBackups['monthly']['action'] = 'delete';
echo "\t\tDeleting oldest monthly backup\n";
}
} else {
echo "\t\tDeleting oldest weekly backup\n";
$oldestBackups['weekly']['action'] = 'delete';
}
}
//Process whatever directory moving was necessary
foreach ($oldestBackups AS $type => $info) {
$command = false;
if ($info['action'] == 'delete') {
$command = 'rm -rf '.$info['path'];
} elseif ($info['action'] == 'move') {
$command = 'mv '.$info['path'].' '.$info['movePath'];
}
if ($command !== false) {
echo "\t\tExecuting $command\n";
system($command, $commandFail);
if ($commandFail === true) {
logError("[$iniFile] Failed to execute command: $command");
}
}
}
}
}
}
//Close the connection to the MySQL server (so we leave as few open threads as possible)
$connection->close();
if (@rmdir($backupDir) === false) {
logError("[$iniFile] Unable to remove directory: $backupDir");
}
}
if (@rmdir($parentBackupDir) === false) {
logError("Unable to remove temp directory: ".$parentBackupDir);
}
//Send the alert email if there are alerts to report
if (count($GLOBALS['errors'])) {
mail(ALERT_EMAIL, 'Errors occurred running MySQL backup', print_r($GLOBALS['errors'], true));
}