diff --git a/Tardis/Cache.py b/Tardis/Cache.py index 198e984..b225355 100644 --- a/Tardis/Cache.py +++ b/Tardis/Cache.py @@ -53,10 +53,10 @@ def insert(self, key, value, now=None, timeout=None): self.cache[key] = (value, timeout) self.logger.debug("Inserting key %s", key) if self.size != 0 and len(self.cache) > self.size: - self.cache.flush() + self.flush() if len(self.cache) > self.size: self.cache.popitem(False) - + def retrieve(self, key): if not key in self.cache: self.logger.debug("Retrieving key %s failed", key) @@ -73,15 +73,15 @@ def retrieve(self, key): def delete(self, key): if key in self.cache: del self.cache[key] - + def flush(self): now = time.time() i = self.cache.iteritems() z = i.next() try: while z: - (key, item) = z - (value, timeout) = item + (_, item) = z + (_, timeout) = item if timeout > now: return self.cache.popitem(False) @@ -91,7 +91,7 @@ def flush(self): pass def purge(self): - self.cache = collection.OrderedDict() + self.cache = collections.OrderedDict() if __name__ == "__main__": c = Cache(5, 2) diff --git a/Tardis/CacheDir.py b/Tardis/CacheDir.py index 6ec6ed5..70f8a1a 100644 --- a/Tardis/CacheDir.py +++ b/Tardis/CacheDir.py @@ -32,10 +32,9 @@ import os.path import socket import logging -import pwd, grp import shutil -class CacheDir: +class CacheDir(object): def __init__(self, root, parts=2, partsize=2, create=True, user=None, group=None): self.root = os.path.abspath(root) self.parts = parts @@ -48,18 +47,18 @@ def __init__(self, root, parts=2, partsize=2, create=True, user=None, group=None if create: os.makedirs(self.root) if self.chown: - os.chown(self.root, self.user, self.group) + os.chown(self.root, self.user, self.group) else: raise Exception("CacheDir does not exist: " + root) def comps(self, name): return [name[(i * self.partsize):((i + 1) * self.partsize)] for i in range(0, self.parts)] - def dir(self, name): + def dirPath(self, name): return reduce(os.path.join, self.comps(name), self.root) def path(self, name): - return os.path.join(self.dir(name), name) + return os.path.join(self.dirPath(name), name) def exists(self, name): return os.path.lexists(self.path(name)) @@ -72,9 +71,9 @@ def size(self, name): return 0 def mkdir(self, name): - dir = self.dir(name) - if not os.path.isdir(dir): - os.makedirs(dir) + directory = self.dirPath(name) + if not os.path.isdir(directory): + os.makedirs(directory) if self.chown: path = self.root for i in self.comps(name): @@ -100,7 +99,7 @@ def insert(self, name, source): def link(self, source, dest): dstpath = self.path(dest) - srcpath = os.path.relpath(self.path(source), self.dir(dest)) + srcpath = os.path.relpath(self.path(source), self.dirPath(dest)) os.symlink(srcpath, dstpath) def remove(self, name): @@ -117,7 +116,7 @@ def removeSuffixes(self, name, suffixes): #logger.debug("Removed %s", name + suffix) deleted += 1 return deleted - + def move(self, oldname, newname): try: os.rename(self.path(oldname), self.path(newname)) @@ -132,7 +131,7 @@ def move(self, oldname, newname): path = os.path.join("cache", socket.gethostname()) c = CacheDir(path, 4, 2, True) print c.comps(test) - print c.dir(test) + print c.dirPath(test) print c.path(test) print c.exists(test) @@ -149,5 +148,3 @@ def move(self, oldname, newname): for line in fd: print line, print c.exists(test) - - diff --git a/Tardis/CompressedBuffer.py b/Tardis/CompressedBuffer.py index eeb7981..0de9b41 100644 --- a/Tardis/CompressedBuffer.py +++ b/Tardis/CompressedBuffer.py @@ -29,11 +29,9 @@ # POSSIBILITY OF SUCH DAMAGE. import zlib -import base64 import sys -import StringIO -import hashlib -import librsync + +import Tardis.librsync as librsync _defaultChunksize = 128 * 1024 @@ -66,7 +64,7 @@ def read(self, size=0x7fffffff): #print "read called: {} {} bytes available".format(size, avail) out = "" left = size - while(len(out) < size): + while len(out) < size: #print "read loop: so far: {}".format(len(out)) if (not self.buffer) or (len(self.buffer) == 0): #print "Calling _get" @@ -114,37 +112,36 @@ def _get(self): ret = None if self.stream: while not ret: - buffer = self.stream.read(self.chunksize) - self.uncompressed += len(buffer) + buf = self.stream.read(self.chunksize) + self.uncompressed += len(buf) if self.hasher: - self.hasher.update(buffer) + self.hasher.update(buf) if self.sig: - self.sig.step(buffer) + self.sig.step(buf) # First time around, create a compressor and check the compression ratio if self.first: self.first = False - buflen = len(buffer) self.compressor = zlib.compressobj() - ret = self.compressor.compress(buffer) - # Flush the buffer and colculate the size + ret = self.compressor.compress(buf) + # Flush the buf and colculate the size ret += self.compressor.flush(zlib.Z_SYNC_FLUSH) # Now, check what we've got back. if ret: - ratio = float(len(ret)) / float(len(buffer)) - #print "Initial ratio: {} {} {}".format(ratio, len(ret), len(buffer)) + ratio = float(len(ret)) / float(len(buf)) + #print "Initial ratio: {} {} {}".format(ratio, len(ret), len(buf)) if ratio > self.threshold: - ret = buffer + ret = buf self.compressor = None elif self.compressor: - if not buffer: + if not buf: #print "_get: Done" ret = self.compressor.flush(zlib.Z_FINISH) self.stream = None else: - #print "_get: {} bytes read".format(len(buffer)) - ret = self.compressor.compress(buffer) + #print "_get: {} bytes read".format(len(buf)) + ret = self.compressor.compress(buf) else: - ret = buffer + ret = buf break # Make sure we don't got around the loop at the EOF self.compressed += len(ret) @@ -158,11 +155,11 @@ def compsize(self): return self.compressed def ratio(self): - return (float(self.compressed) / float(self.uncompressed)) + return float(self.compressed) / float(self.uncompressed) def size(self): return self.origsize() - + def isCompressed(self): return self.compressor != None @@ -178,23 +175,23 @@ def _get(self): ret = None while not ret: if self.stream: - buffer = self.stream.read(self.chunksize) - if not buffer: + buf = self.stream.read(self.chunksize) + if not buf: #print "_get: Done" ret = self.compressor.flush() self.uncompressed = self.uncompressed + len(ret) self.stream = None else: - #print "_get: {} bytes read".format(len(buffer)) - ret = self.compressor.decompress(buffer) - self.compressed = self.uncompressed + len(buffer) + #print "_get: {} bytes read".format(len(buf)) + ret = self.compressor.decompress(buf) + self.compressed = self.uncompressed + len(buf) self.uncompressed = self.compressed + len(ret) return ret return None if __name__ == "__main__": print "Opening {}".format(sys.argv[1]) - x = CompressedBufferedReader(file(sys.argv[1], "rb"), checksum=True) + x = CompressedBufferedReader(file(sys.argv[1], "rb")) #line = x.get() with file(sys.argv[2], "wb") as f: line = x.read(16384) @@ -205,16 +202,3 @@ def _get(self): line = x.read(16384) print x.origsize(), " ", x.compsize(), " ", x.ratio(), " :: ", x.checksum() - -""" - print "Opening {}".format(sys.argv[2]) - y = UncompressedBufferedReader(file(sys.argv[2], "rb")) - total = 0 - line = y.read(size=80) - while line: - #print "==== ", len(line), ":", total, " :: ", line #base64.b64encode(line) - #print line - total += len(line) - #line = x.get() - line = y.read(size=80) -""" diff --git a/Tardis/Config.py b/Tardis/Config.py index ab533d1..8072e29 100644 --- a/Tardis/Config.py +++ b/Tardis/Config.py @@ -28,17 +28,18 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. -import ConfigParser -import Defaults -import argparse -import Util import sys +import ConfigParser + +import Tardis.Defaults as Defaults +import Tardis.Util as Util + configDefaults = { 'Database': Defaults.getDefault('TARDIS_DB'), 'Client': Defaults.getDefault('TARDIS_CLIENT'), - 'DBDir': Defaults.getDefault('TARDIS_DBDIR'), - 'DBName': Defaults.getDefault('TARDIS_DBNAME'), + 'DBDir': Defaults.getDefault('TARDIS_DBDIR'), + 'DBName': Defaults.getDefault('TARDIS_DBNAME'), 'Password': None, 'PasswordFile': None, 'PasswordProg': None, @@ -86,5 +87,5 @@ def addPasswordOptions(parser): pwgroup.add_argument('--password-prog', dest='passwordprog', default=config.get(job, 'PasswordProg'), help='Use the specified command to generate the password on stdout') passgroup.add_argument('--crypt', dest='crypt',action=Util.StoreBoolean, default=config.getboolean(job, 'Crypt'), - help='Encrypt data. Only valid if password is set') + help='Encrypt data. Only valid if password is set') passgroup.add_argument('--keys', dest='keys', default=config.get(job, 'KeyFile'), help='Load keys from file.') diff --git a/Tardis/Connection.py b/Tardis/Connection.py index 2a2166a..0596710 100644 --- a/Tardis/Connection.py +++ b/Tardis/Connection.py @@ -31,11 +31,11 @@ import socket import json import uuid -import sys import time -import Messages import ssl + import Tardis +import Tardis.Messages as Messages protocolVersion = "1.1" headerString = "TARDIS " + protocolVersion @@ -80,7 +80,7 @@ def __init__(self, host, port, name, encoding, priority, client, autoname, token raise Exception("Unknown protocol: {}".format(message)) # Create a BACKUP message - message = { + resp = { 'message' : 'BACKUP', 'host' : client, 'encoding' : encoding, @@ -94,9 +94,9 @@ def __init__(self, host, port, name, encoding, priority, client, autoname, token 'full' : full } if token: - message['token'] = token + resp['token'] = token # BACKUP { json message } - self.put(json.dumps(message)) + self.put(json.dumps(resp)) message = self.sock.recv(1024).strip() fields = json.loads(message) @@ -114,7 +114,7 @@ def __init__(self, host, port, name, encoding, priority, client, autoname, token self.filenameKey = fields['filenameKey'] if 'contentKey' in fields: self.contentKey = fields['contentKey'] - except Exception as e: + except Exception: self.sock.close() raise @@ -123,7 +123,7 @@ def put(self, message): self.stats['messagesSent'] += 1 return - def recv(n): + def recv(self, n): msg = '' while len(msg) < n: chunk = self.sock.recv(n-len(msg)) @@ -206,18 +206,10 @@ def __init__(self, host, port, name, priority=0, client=None, autoname=False, to # Really, cons this up in the connection, but it needs access to the sock parameter, so..... self.sender = Messages.MsgPackMessages(self.sock, stats=self.stats, compress=compress) -class NullConnection(Connection): - def __init__(self, host, port, name): - pass - - def send(self, message): - print json.dumps(message) - - def receive(self): - return None - if __name__ == "__main__": - """ Test Code """ + """ + Test Code + """ conn = JsonConnection("localhost", 9999, "HiMom") print conn.getSessionId() conn.send({ 'x' : 1 }) diff --git a/Tardis/Defaults.py b/Tardis/Defaults.py index 3e289ea..e235db7 100644 --- a/Tardis/Defaults.py +++ b/Tardis/Defaults.py @@ -35,57 +35,52 @@ SECTION = 'Tardis' _defaults = { - 'TARDIS_DB' : '/srv/tardis', - 'TARDIS_DBDIR' : '', - 'TARDIS_DBNAME' : 'tardis.db', - 'TARDIS_CLIENT' : socket.gethostname(), - 'TARDIS_SERVER' : 'localhost', - 'TARDIS_EXCLUDES' : '.tardis-excludes', - 'TARDIS_LOCAL_EXCLUDES' : '.tardis-local-excludes', - 'TARDIS_GLOBAL_EXCLUDES': '/etc/tardis/excludes', - 'TARDIS_SKIP' : '.tardis-skip', - 'TARDIS_PORT' : '7420', - 'TARDIS_TIMEOUT' : '300', - 'TARDIS_DAEMON_CONFIG' : '/etc/tardis/tardisd.cfg', - 'TARDIS_LOCAL_CONFIG' : '/etc/tardis/tardisd.local.cfg', - 'TARDIS_PIDFILE' : '/var/run/tardisd.pid', - 'TARDIS_JOURNAL' : 'tardis.journal', - 'TARDIS_SCHEMA' : 'schema/tardis.sql', - 'TARDIS_REMOTE_PORT' : '7430', - 'TARDIS_REMOTE_CONFIG' : '/etc/tardis/tardisremote.cfg', - 'TARDIS_REMOTE_PIDFILE' : '/var/run/tardisremote.pid', - 'TARDIS_LS_COLORS' : "gone=yellow:changed=cyan:full=cyan,,bold:moved=blue:header=green:name=white:error=red,,bold:default=white", - 'TARDIS_NOCOMPRESS' : None, - 'TARDIS_RECENT_SET' : 'Current', - 'TARDIS_PW_STRENGTH' : '0.75', - 'TARDIS_DEFAULTS' : '/etc/tardis/system.defaults' - } + 'TARDIS_DB' : '/srv/tardis', + 'TARDIS_DBDIR' : '', + 'TARDIS_DBNAME' : 'tardis.db', + 'TARDIS_CLIENT' : socket.gethostname(), + 'TARDIS_SERVER' : 'localhost', + 'TARDIS_EXCLUDES' : '.tardis-excludes', + 'TARDIS_LOCAL_EXCLUDES' : '.tardis-local-excludes', + 'TARDIS_GLOBAL_EXCLUDES': '/etc/tardis/excludes', + 'TARDIS_SKIP' : '.tardis-skip', + 'TARDIS_PORT' : '7420', + 'TARDIS_TIMEOUT' : '300', + 'TARDIS_DAEMON_CONFIG' : '/etc/tardis/tardisd.cfg', + 'TARDIS_LOCAL_CONFIG' : '/etc/tardis/tardisd.local.cfg', + 'TARDIS_PIDFILE' : '/var/run/tardisd.pid', + 'TARDIS_JOURNAL' : 'tardis.journal', + 'TARDIS_SCHEMA' : 'schema/tardis.sql', + 'TARDIS_REMOTE_PORT' : '7430', + 'TARDIS_REMOTE_CONFIG' : '/etc/tardis/tardisremote.cfg', + 'TARDIS_REMOTE_PIDFILE' : '/var/run/tardisremote.pid', + 'TARDIS_LS_COLORS' : "gone=yellow:changed=cyan:full=cyan,,bold:moved=blue:header=green:name=white:error=red,,bold:default=white", + 'TARDIS_NOCOMPRESS' : None, + 'TARDIS_RECENT_SET' : 'Current', + 'TARDIS_PW_STRENGTH' : '0.75', + 'TARDIS_DEFAULTS' : '/etc/tardis/system.defaults' +} try: _default_file = os.environ['TARDIS_DEFAULTS'] -except: +except KeyError: _default_file = _defaults['TARDIS_DEFAULTS'] _parser = ConfigParser.ConfigParser(_defaults) _parser.add_section(SECTION) # Keep it happy later. _parser.read(_default_file) - -""" -Get a default value -""" def getDefault(var): if var in os.environ: return os.environ[var] else: try: return _parser.get(SECTION, var) - except ConfigParser.Error as e: + except ConfigParser.Error: return None if __name__ == "__main__": print _default_file - for i in _defaults.keys(): + for i in _defaults: print "%-24s: %s" % (i, getDefault(i)) - diff --git a/Tardis/Diff.py b/Tardis/Diff.py index 975e40e..56ac650 100644 --- a/Tardis/Diff.py +++ b/Tardis/Diff.py @@ -28,7 +28,8 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. -import os, os.path +import os +import os.path import sys import difflib import argparse @@ -39,14 +40,12 @@ import parsedatetime import Tardis -import Util -import TardisDB -import TardisCrypto -import Regenerator -import CacheDir -import Defaults -import Config +import Tardis.Util as Util +import Tardis.Regenerator as Regenerator +import Tardis.Defaults as Defaults +import Tardis.Config as Config +logger = None args = None current = Defaults.getDefault('TARDIS_RECENT_SET') @@ -54,7 +53,6 @@ def parseArgs(): isatty = os.isatty(sys.stdout.fileno()) global args - current = Defaults.getDefault('TARDIS_RECENT_SET') parser = argparse.ArgumentParser(description='Diff files between current and a Tardis backup, or multiple Tardis versions', fromfile_prefix_chars='@', formatter_class=Util.HelpFormatter, add_help=False) (args, remaining) = Config.parseConfigOptions(parser) @@ -74,7 +72,7 @@ def parseArgs(): parser.add_argument('--reduce-path', '-R', dest='reduce', default=0, const=sys.maxint, type=int, nargs='?', metavar='N', help='Reduce path by N directories. No value for "smart" reduction') - parser.add_argument('--recurse', '-r', dest='recurse', default=False, action=Util.StoreBoolean, help='Recurse into directories. Default: %(default)s'); + parser.add_argument('--recurse', '-r', dest='recurse', default=False, action=Util.StoreBoolean, help='Recurse into directories. Default: %(default)s') parser.add_argument('--list', '-l', dest='list', default=False, action=Util.StoreBoolean, help='Only list files that differ. Do not show diffs. Default: %(default)s') parser.add_argument('--verbose', '-v', action='count', dest='verbose', default=0, help='Increase the verbosity') @@ -93,10 +91,6 @@ def setupLogging(verbosity): loglevel = levels[verbosity] if verbosity < len(levels) else logging.DEBUG logging.basicConfig(level=loglevel) logger = logging.getLogger('') - pass - -def setupFiles(filename, cache, db, crypt): - pass def setcolor(line): if args.color: @@ -180,7 +174,7 @@ def getFileInfo(path, bset, tardis, crypt, reducePath): def diffDir(path, regenerator, bsets, tardis, crypt, reducePath, now, then, recurse=True): # Collect the first directory contents - (info1, p1) = getFileInfo(path, bsets[0]['backupset'], tardis, crypt, reducePath) + (info1, _) = getFileInfo(path, bsets[0]['backupset'], tardis, crypt, reducePath) entries1 = tardis.readDirectory((info1['inode'], info1['device'])) names1 = ([x['name'] for x in entries1]) if crypt: @@ -189,7 +183,7 @@ def diffDir(path, regenerator, bsets, tardis, crypt, reducePath, now, then, recu names1 = sorted(names1) if bsets[1]: - (info2, p2) = getFileInfo(path, bsets[1]['backupset'], tardis, crypt, reducePath) + (info2, _) = getFileInfo(path, bsets[1]['backupset'], tardis, crypt, reducePath) entries2 = tardis.readDirectory((info2['inode'], info2['device'])) names2 = [x['name'] for x in entries2] if crypt: @@ -212,7 +206,7 @@ def diffDir(path, regenerator, bsets, tardis, crypt, reducePath, now, then, recu def diffFile(fName, regenerator, bsets, tardis, crypt, reducePath, recurse, now, then): """ - Diff two files, either both from the database, or one from the database, and one from the + Diff two files, either both from the database, or one from the database, and one from the actual filesystem """ path = os.path.abspath(fName) @@ -306,7 +300,7 @@ def main(): for f in args.files: if bsets[1] is None and os.path.isdir(f): - diffDir(f, r, bsets, tardis, crypt, reducePath, now, then, recurse=args.recurse) + diffDir(f, r, bsets, tardis, crypt, args.reduce, now, then, recurse=args.recurse) else: diffFile(f, r, bsets, tardis, crypt, args.reduce, args.recurse, now, then) except KeyboardInterrupt: diff --git a/Tardis/HttpInterface.py b/Tardis/HttpInterface.py index 750c58d..3e88110 100644 --- a/Tardis/HttpInterface.py +++ b/Tardis/HttpInterface.py @@ -28,26 +28,28 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. -from flask import Flask, Response, session, request, url_for, escape, abort, redirect, send_file, make_response -from tornado.wsgi import WSGIContainer -from tornado.httpserver import HTTPServer -from tornado.ioloop import IOLoop -import os, os.path -import logging, logging.handlers + +import os +import os.path +import logging +import logging.handlers import json import argparse import ConfigParser import zlib -import base64 import daemonize -import Tardis -import TardisDB -import Util -import CacheDir -import Defaults +from flask import Flask, Response, session, request, url_for, abort, redirect, make_response +from tornado.wsgi import WSGIContainer +from tornado.httpserver import HTTPServer +from tornado.ioloop import IOLoop +import Tardis +import Tardis.TardisDB as TardisDB +import Tardis.Util as Util +import Tardis.CacheDir as CacheDir +import Tardis.Defaults as Defaults basedir = Defaults.getDefault('TARDIS_DB') dbname = Defaults.getDefault('TARDIS_DBNAME') @@ -85,8 +87,10 @@ args = None config = None +logger = None + def getDB(): - if not 'host' in session: + if 'host' not in session: abort(401) host = session['host'] db = dbs[host] @@ -235,7 +239,7 @@ def getFileInfoByPathForRange(first, last, pathname): def getFileInfoByName(backupset, device, inode, name): #app.logger.info("getFiloInfoByName Invoked: %d (%d,%d) %s", backupset, inode, device, name) db = getDB() - return createResponse(akeDict(db.getFileInfoByName(name, (inode, device), backupset))) + return createResponse(makeDict(db.getFileInfoByName(name, (inode, device), backupset))) # readDirectory @app.route('/readDirectory///') @@ -305,7 +309,7 @@ def _stream(f): try: f.seek(0) r = f.read(_blocksize) - while (r): + while r: yield r r = f.read(_blocksize) except Exception as e: @@ -351,7 +355,7 @@ def setKeys(): token = request.form['token'] fKey = request.form.get('FilenameKey') cKey = request.form.get('ContentKey') - if (db.setKeys(token, fKey, cKey) == False): + if not db.setKeys(token, fKey, cKey): raise Exception("Unable to set keys") return "OK" except Exception as e: @@ -363,9 +367,9 @@ def setToken(): try: db = getDB() token = request.form['token'] - if (db.setToken(token, fKey, cKey) == False): - raise Exception("Unable to set keys") - except Exception as e: + if not db.setToken(token): + raise Exception("Unable to set token") + except Exception: abort(403) @app.route('/listPurgeSets///') @@ -388,7 +392,7 @@ def listPurgeIncomplete(backupset, priority, timestamp): def purgeSets(backupset, priority, timestamp): db = getDB() return createResponse(db.purgeSets(priority, timestamp, backupset)) - + @app.route('/purgeIncomplete///') def purgeIncomplete(backupset, priority, timestamp): db = getDB() @@ -463,11 +467,11 @@ def processArgs(): def setupLogging(): levels = [logging.WARNING, logging.INFO, logging.DEBUG] - logger = logging.getLogger('') + log = logging.getLogger('') verbosity = args.verbose loglevel = levels[verbosity] if verbosity < len(levels) else logging.DEBUG - logger.setLevel(loglevel) + log.setLevel(loglevel) format = logging.Formatter("%(asctime)s %(levelname)s : %(message)s") @@ -479,8 +483,8 @@ def setupLogging(): handler = logging.StreamHandler() handler.setFormatter(format) - logger.addHandler(handler) - return logger + log.addHandler(handler) + return log def setup(): global args, config, logger, allowCompress, allowCache @@ -515,10 +519,10 @@ def tornado(): if args.daemon: user = args.user group = args.group - pidfile = args.pidfile + pidfile = args.pidfile fds = [h.stream.fileno() for h in logger.handlers if isinstance(h, logging.StreamHandler)] logger.info("About to daemonize") - + try: daemon = daemonize.Daemonize(app="tardisremote", pid=pidfile, action=run_server, user=user, group=group, keep_fds=fds) daemon.start() diff --git a/Tardis/List.py b/Tardis/List.py index a060633..9690a23 100644 --- a/Tardis/List.py +++ b/Tardis/List.py @@ -33,7 +33,6 @@ import os import os.path import stat -import termcolor import argparse import fnmatch import pwd @@ -41,13 +40,12 @@ import time import parsedatetime +import termcolor + import Tardis -import TardisDB -import RemoteDB -import TardisCrypto -import Util -import Defaults -import Config +import Tardis.Util as Util +import Tardis.Defaults as Defaults +import Tardis.Config as Config columns = None columnfmt = None @@ -72,7 +70,6 @@ fsEncoding = sys.getfilesystemencoding() def setColors(s): - global colors groups = s.split(':') groups = map(str.strip, groups) for g in groups: @@ -156,7 +153,7 @@ def collectFileInfo(filename, tardis, crypt): lookup = crypt.encryptPath(filename.encode()) if crypt else filename fInfos = {} - lInfo = None + lInfo = {} if filename == '/': fInfos = makeFakeRootInfo() elif args.reduce: @@ -213,13 +210,13 @@ def collectDirContents2(tardis, dirList, crypt): names = set() ranges = [] dirRange = [] - prev = None + prev = {} dirHash = dict([(x['backupset'], y) for (x,y) in dirList]) # Detect the ranges for bset in backupSets: d = dirHash.setdefault(bset['backupset']) # If we don't have an entry here, the range ends. - # OR if the inode is different from the previous + # OR if the inode is different from the previous if prev and ((not d) or (prev['inode'] != d['inode']) or (prev['device'] != d['device'])): if len(dirRange): ranges.append(dirRange) @@ -230,7 +227,7 @@ def collectDirContents2(tardis, dirList, crypt): if len(dirRange): ranges.append(dirRange) - # Now, for each range, populate + # Now, for each range, populate for r in ranges: first = r[0]['backupset'] last = r[-1]['backupset'] @@ -242,7 +239,7 @@ def collectDirContents2(tardis, dirList, crypt): name = name.decode(fsEncoding) names.add(name) for bset in r: - if (y['firstset'] <= bset['backupset'] <= y['lastset']): + if y['firstset'] <= bset['backupset'] <= y['lastset']: contents[bset['backupset']][name] = y # and return what we've discovered @@ -276,9 +273,7 @@ def getInfoByName(contents, name): return fInfo -""" -Get group and user names. Very unixy -""" +# Get group and user names. Very unixy _groups = {} _users = {} @@ -306,10 +301,8 @@ def getUserId(uid): else: return None -""" -Format time. If we're less that a year before now, print the time as Jan 12, 02:17, if earlier, -then Jan 12, 2014. Same as ls. -""" +# Format time. If we're less that a year before now, print the time as Jan 12, 02:17, if earlier, +# then Jan 12, 2014. Same as ls. _now = time.time() _yearago = _now - (365 * 24 * 3600) def formatTime(then): @@ -319,7 +312,6 @@ def formatTime(then): fmt = '%b %d, %Y' return time.strftime(fmt, time.localtime(then)) - column = 0 """ @@ -411,13 +403,13 @@ def printit(info, name, color, gone): eol = False doprint(columnfmt % name, color, eol=eol) -def printVersions(fInfos, filename): +def printVersions(fInfos): """ Print info about each version of the file that exists Doesn't actually do the printing, but calls printit to do it. """ global column - prevInfo = None # Previous version's info + prevInfo = {} # Previous version's info lSet = None column = 0 @@ -431,11 +423,12 @@ def printVersions(fInfos, filename): if (info is None) and (prevInfo is None): # file didn't exist here or previously. Just skip continue + if (info is None) and prevInfo is not None: # file disappeared. color = colors['gone'] gone = True - elif (info['checksum'] is None): + elif info['checksum'] is None: # Check for the error case where a file isn't connected to a checksum. Not good. color = colors['error'] new = True @@ -447,7 +440,7 @@ def printVersions(fInfos, filename): else: color = colors['changed'] new = True - elif (info['inode'] != prevInfo['inode']): + elif info['inode'] != prevInfo['inode']: color = colors['moved'] new = True else: @@ -462,7 +455,7 @@ def printVersions(fInfos, filename): # OR if we're printing deletions and we disappered if args.recent or not ((args.all or new) or (args.deletions and gone)): continue - + printit(info, bset['name'], color, gone) if args.recent: @@ -489,7 +482,7 @@ def processFile(filename, fInfos, tardis, crypt, printContents=True, recurse=0, flushLine() if args.versions: - printVersions(fInfos, filename) + printVersions(fInfos) # Figure out which versions of the file are directories @@ -500,12 +493,12 @@ def processFile(filename, fInfos, tardis, crypt, printContents=True, recurse=0, if not args.hidden: names = [n for n in names if not n.startswith('.')] (numCols, fmt) = computeColumnWidth(names) - column = 0 + col = 0 for name in sorted(names, key=lambda x: x.lower().lstrip('.'), reverse=args.reverse): fInfo = getInfoByName(contents, name) - column += 1 - eol = True if ((column % numCols) == 0) else False + col += 1 + eol = True if ((col % numCols) == 0) else False processFile(name, fInfo, tardis, crypt, printContents=False, recurse=0, first=False, fmt=fmt, eol=eol) flushLine() @@ -517,7 +510,7 @@ def processFile(filename, fInfos, tardis, crypt, printContents=True, recurse=0, if not args.hidden: names = [n for n in names if not n.startswith('.')] (numCols, fmt) = computeColumnWidth(names) - column = 0 + col = 0 for name in sorted(names, key=lambda x: x.lower().lstrip('.'), reverse=args.reverse): fInfos = getInfoByName(contents, name) @@ -534,43 +527,43 @@ def findSet(name): doprint("Could not find backupset %s" % name, color=colors['error'], eol=True) return -1 -""" -Prune backupsets to only those in the specified range. -""" def pruneBackupSets(startRange, endRange): + """ + Prune backupsets to only those in the specified range. + """ global backupSets newsets = backupSets[:] for i in backupSets: - if not (startRange <= i['backupset'] <= endRange): + if not startRange <= i['backupset'] <= endRange: newsets.remove(i) backupSets = newsets -""" -Parse and check the range varables, and prune the set appopriately. -""" def pruneBackupSetsByRange(): - range = args.range.split(':') - if len(range) > 2: + """ + Parse and check the range varables, and prune the set appopriately. + """ + setRange = args.range.split(':') + if len(setRange) > 2: doprint("Invalid range '%s'" % args.range, color=colors['error'], eol=True) sys.exit(1) - elif len(range) == 1: - range.append(range[0]) + elif len(setRange) == 1: + setRange.append(setRange[0]) - if range[0]: + if setRange[0]: try: - startRange = int(range[0]) + startRange = int(setRange[0]) except ValueError: - startRange = findSet(range[0]) + startRange = findSet(setRange[0]) if startRange == -1: sys.exit(1) else: startRange = 0 - if range[1]: + if setRange[1]: try: - endRange = int(range[1]) + endRange = int(setRange[1]) except ValueError: - endRange = findSet(range[1]) + endRange = findSet(setRange[1]) if endRange == -1: sys.exit(1) else: @@ -582,24 +575,23 @@ def pruneBackupSetsByRange(): pruneBackupSets(startRange, endRange) -""" -Parse and check the date range variable, and prune the range appropriately. -""" def pruneBackupSetsByDateRange(tardis): - global backupSets + """ + Parse and check the date range variable, and prune the range appropriately. + """ cal = parsedatetime.Calendar() - range = args.daterange.split(':') - if len(range) > 2: + daterange = args.daterange.split(':') + if len(daterange) > 2: doprint("Invalid range '%s'" % args.daterange, color=colors['error'], eol=True) sys.exit(1) - elif len(range) == 1: - range.append('') + elif len(daterange) == 1: + daterange.append('') - if range[0]: - (then, success) = cal.parse(range[0]) + if daterange[0]: + (then, success) = cal.parse(daterange[0]) if success: startTime = time.mktime(then) - startSet = tardis.getBackupSetInfoForTime(startTime) + if startSet: # Get the backupset, then add 1. Backupset will be the LAST backupset before # the start time, so 1 larger should be the first backupset after that. @@ -608,14 +600,14 @@ def pruneBackupSetsByDateRange(tardis): else: startRange = 0 else: - doprint("Invalid time: %s" % range[0], color=colors['error'], eol=True) + doprint("Invalid time: %s" % daterange[0], color=colors['error'], eol=True) sys.exit(1) else: startRange = 0 startTime = time.mktime(time.gmtime(0)) - if range[1]: - (then, success) = cal.parse(range[1]) + if daterange[1]: + (then, success) = cal.parse(daterange[1]) if success: endTime = time.mktime(then) endSet = tardis.getBackupSetInfoForTime(endTime) @@ -624,7 +616,7 @@ def pruneBackupSetsByDateRange(tardis): else: endRange = sys.maxint else: - doprint("Invalid time: %s" % range[1], color=colors['error'], eol=True) + doprint("Invalid time: %s" % daterange[1], color=colors['error'], eol=True) sys.exit(1) else: endRange = sys.maxint @@ -640,10 +632,10 @@ def pruneBackupSetsByDateRange(tardis): pruneBackupSets(startRange, endRange) -""" -Given a list of names, compute the columns widths -""" def computeColumnWidth(names): + """ + Given a list of names, compute the columns widths + """ if len(names) == 0: return (1, '%s') longestName = max(map(len, names)) @@ -652,7 +644,7 @@ def computeColumnWidth(names): columns = args.columns else: if os.isatty(sys.stdout.fileno()): - height, width = Util.getTerminalSize() + _, width = Util.getTerminalSize() width -= 2 # lop a couple characters off the end to avoid annoying wraps in some cases. columns = width / (longestName + 4) else: @@ -662,10 +654,10 @@ def computeColumnWidth(names): return (columns, fmt) -""" -Calculate display parameters, including creating the list of backupsets that we want to process -""" def setupDisplay(tardis): + """ + Calculate display parameters, including creating the list of backupsets that we want to process + """ global columns, columnfmt global backupSets @@ -681,7 +673,7 @@ def setupDisplay(tardis): def globPath(path, tardis, crypt, first=0): """ - Glob a path. Only globbs the first + Glob a path. Only globbs the first """ logger.debug("Globbing %s", path) if not Util.isMagic(path): @@ -701,7 +693,7 @@ def globPath(path, tardis, crypt, first=0): dirs = [(x, fInfos[x['backupset']]) for x in backupSets if fInfos[x['backupset']] and fInfos[x['backupset']]['dir'] == 1] # And cons up the names which are in those directories - (data, names) = collectDirContents2(tardis, dirs, crypt) + (_, names) = collectDirContents2(tardis, dirs, crypt) # Filter down any that match matches = fnmatch.filter(names, pattern) @@ -720,8 +712,8 @@ def processArgs(): isatty = os.isatty(sys.stdout.fileno()) parser = argparse.ArgumentParser(description='List Tardis File Versions', fromfile_prefix_chars='@', formatter_class=Util.HelpFormatter, add_help=False) - - (args, remaining) = Config.parseConfigOptions(parser) + + (_, remaining) = Config.parseConfigOptions(parser) Config.addCommonOptions(parser) Config.addPasswordOptions(parser) @@ -745,7 +737,7 @@ def processArgs(): parser.add_argument('--headers', dest='headers', default=True, action=Util.StoreBoolean, help='Show headers. Default: %(default)s') parser.add_argument('--colors', dest='colors', default=isatty, action=Util.StoreBoolean, help='Use colors. Default: %(default)s') parser.add_argument('--columns', dest='columns', type=int, default=None , help='Number of columns to display') - + parser.add_argument('--recurse', '-R', dest='recurse', default=False, action='store_true', help='List Directories Recurively') parser.add_argument('--maxdepth', dest='maxdepth', default=sys.maxint, type=int, help='Maximum depth to recurse directories') @@ -753,7 +745,7 @@ def processArgs(): parser.add_argument('--glob', dest='glob', default=False, action=Util.StoreBoolean, help='Glob filenames') parser.add_argument('--reduce', dest='reduce', default=0, type=int, const=sys.maxint, nargs='?', - help='Reduce paths by N directories. No value for smart reduction') + help='Reduce paths by N directories. No value for smart reduction') parser.add_argument('--realpath', dest='realpath', default=True, action=Util.StoreBoolean, help='Use the full path, expanding symlinks to their actual path components') rangegrp = parser.add_mutually_exclusive_group() @@ -770,10 +762,8 @@ def processArgs(): def main(): global args, logger try: - - FORMAT = "%(levelname)s : %(message)s" - logging.basicConfig(stream=sys.stderr, format=FORMAT, level=logging.DEBUG) + logging.basicConfig(stream=sys.stderr, format=FORMAT, level=logging.INFO) logger = logging.getLogger("") args = processArgs() @@ -784,7 +774,7 @@ def main(): password = Util.getPassword(args.password, args.passwordfile, args.passwordprog, prompt="Password for %s: " % (args.client)) args.password = None - (tardis, cache, crypt) = Util.setupDataConnection(args.database, args.client, password, args.keys, args.dbname, args.dbdir) + (tardis, _, crypt) = Util.setupDataConnection(args.database, args.client, password, args.keys, args.dbname, args.dbdir) setupDisplay(tardis) diff --git a/Tardis/RemoteDB.py b/Tardis/RemoteDB.py index 04d17e7..96838a4 100644 --- a/Tardis/RemoteDB.py +++ b/Tardis/RemoteDB.py @@ -28,21 +28,17 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. -import requests import logging import tempfile import sys -import urllib, urllib3 -import warnings +import urllib +import functools -import os, os.path +import requests +import requests_cache import Tardis -import ConnIdLogAdapter -import requests_cache - -import functools requests_cache.install_cache(backend='memory', expire_after=30.0) @@ -117,7 +113,7 @@ def __init__(self, url, host, prevSet=None, extra=None, token=None, compress=Tru self.prevBackupSet = b['backupset'] self.prevBackupDate = b['starttime'] self.lastClientTime = b['clienttime'] - self.logger.debug("Last Backup Set: {} {} ".format(self.prevBackupName, self.prevBackupSet)) + self.logger.debug("Last Backup Set: %s %d", self.prevBackupName, self.prevBackupSet) def connect(self): self.logger.debug("Creating new connection to %s for %s", self.baseURL, self.host) @@ -136,8 +132,8 @@ def _bset(self, current): """ Determine the backupset we're being asked about. True == current, false = previous, otherwise a number is returned """ - if type(current) is bool: - return str(self.currBackupSet) if current else str(self.prevBackupSet) + if current.isntanceof(bool): + return str(None) if current else str(self.prevBackupSet) else: return str(current) diff --git a/Tardis/Rotator.py b/Tardis/Rotator.py index 65efd5e..3f1ccca 100644 --- a/Tardis/Rotator.py +++ b/Tardis/Rotator.py @@ -30,13 +30,12 @@ import shutil import logging -import subprocess -import os, os.path -import stat +import os +import os.path import time import gzip -class Rotator: +class Rotator(object): def __init__(self, rotations=5, compress=32 * 1024): self.logger = logging.getLogger("Rotator") self.rotations = rotations diff --git a/Tardis/Sonic.py b/Tardis/Sonic.py index f382552..f5582fb 100644 --- a/Tardis/Sonic.py +++ b/Tardis/Sonic.py @@ -30,26 +30,25 @@ import logging import argparse -import ConfigParser -import os, os.path +import os +import os.path import sys import time import datetime import pprint import urlparse -import json import parsedatetime import passwordmeter import Tardis -import Util -import Defaults -import TardisDB -import TardisCrypto -import CacheDir -import RemoteDB -import Config +import Tardis.Util as Util +import Tardis.Defaults as Defaults +import Tardis.TardisDB as TardisDB +import Tardis.TardisCrypto as TardisCrypto +import Tardis.CacheDir as CacheDir +import Tardis.RemoteDB as RemoteDB +import Tardis.Config as Config current = Defaults.getDefault('TARDIS_RECENT_SET') pwStrMin = Defaults.getDefault('TARDIS_PW_STRENGTH') @@ -61,13 +60,14 @@ minPwStrength = 0 logger = None +args = None -def getDB(crypt, new=False, keyfile=None, allowRemote=True): +def getDB(crypt, new=False, allowRemote=True): token = crypt.createToken() if crypt else None loc = urlparse.urlparse(args.database) # This is basically the same code as in Util.setupDataConnection(). Should consider moving to it. if (loc.scheme == 'http') or (loc.scheme == 'https'): - if (not allowRemote): + if not allowRemote: raise Exception("This command cannot be executed remotely. You must execute it on the server directly.") # If no port specified, insert the port if loc.port is None: @@ -95,7 +95,7 @@ def getDB(crypt, new=False, keyfile=None, allowRemote=True): def createClient(crypt): try: - (db, cache) = getDB(None, True, allowRemote=False) + (db, _) = getDB(None, True, allowRemote=False) db.close() if crypt: setToken(crypt) @@ -107,7 +107,7 @@ def createClient(crypt): def setToken(crypt): try: # Must be no token specified yet - (db, cache) = getDB(None) + (db, _) = getDB(None) crypt.genKeys() (f, c) = crypt.getKeys() token = crypt.createToken() @@ -126,7 +126,7 @@ def setToken(crypt): def changePassword(crypt, crypt2): try: - (db, cache) = getDB(crypt) + (db, _) = getDB(crypt) # Load the keys, and insert them into the crypt object, to decyrpt them if args.keys: (f, c) = Util.loadKeys(args.keys, db.getConfigValue('ClientID')) @@ -161,7 +161,7 @@ def moveKeys(db, crypt): return 1 clientId = db.getConfigValue('ClientID') token = crypt.createToken() - (db, cache) = getDB(crypt) + (db, _) = getDB(crypt) if args.extract: (f, c) = db.getKeys() if not (f and c): @@ -183,7 +183,7 @@ def moveKeys(db, crypt): logger.exception(e) return 1 -def listBSets(db, crypt): +def listBSets(db): try: last = db.lastBackupSet() for i in db.listBackupSets(): @@ -203,7 +203,7 @@ def listBSets(db, crypt): logger.exception(e) return 1 -def _bsetInfo(db, crypt, info): +def _bsetInfo(db, info): print "Backupset : %s (%d)" % ((info['name']), info['backupset']) print "Completed : %s" % ('True' if info['completed'] else 'False') t = time.strftime("%d %b, %Y %I:%M:%S %p", time.localtime(float(info['starttime']))) @@ -229,19 +229,19 @@ def _bsetInfo(db, crypt, info): print "Purgeable Size : %s" % (Util.fmtSize(endInfo[1])) print "Purgeable Space : %s" % (Util.fmtSize(endInfo[2])) -def bsetInfo(db, crypt): +def bsetInfo(db): printed = False if args.backup or args.date: info = getBackupSet(db, args.backupset, args.date) if info: - _bsetInfo(db, crypt, info) + _bsetInfo(db, info) printed = True else: first = True for info in db.listBackupSets(): if not first: print "------------------------------------------------" - _bsetInfo(db, crypt, info) + _bsetInfo(db, info) first = False printed = True if printed: @@ -253,11 +253,11 @@ def confirm(): else: print "Proceed (y/n): ", yesno = sys.stdin.readline().strip().upper() - return (yesno == 'YES' or yesno == 'Y') + return yesno == 'YES' or yesno == 'Y' -def purge(db, cache, crypt): +def purge(db, cache): bset = getBackupSet(db, args.backup, args.date, True) - if bset == None: + if bset is None: logger.error("No backup set found") sys.exit(1) # List the sets we're going to delete @@ -290,7 +290,7 @@ def deleteBsets(db, cache): bsets = [] for i in args.backups: bset = getBackupSet(db, i, None) - if bset == None: + if bset is None: logger.error("No backup set found for %s", i) sys.exit(1) bsets.append(bset) @@ -339,7 +339,7 @@ def parseArgs(): global args, minPwStrength parser = argparse.ArgumentParser(description='Tardis Sonic Screwdriver Utility Program', fromfile_prefix_chars='@', formatter_class=Util.HelpFormatter, add_help=False) - + (args, remaining) = Config.parseConfigOptions(parser) c = Config.config t = args.job @@ -371,11 +371,8 @@ def parseArgs(): keyParser.add_argument('--delete', dest='deleteKeys', default=False, action=Util.StoreBoolean, help='Delete keys from server or database') common = argparse.ArgumentParser(add_help=False) - """ - common.add_argument('--database', '-D', dest='database', default=c.get(t, 'Database'), help="Database to use. Default: %(default)s") - common.add_argument('--client', '-C', dest='client', default=c.get(t, 'Client'), help="Client to list on. Default: %(default)s") - common.add_argument("--dbname", "-N", dest="dbname", default=c.get(t, 'DBName'), help="Name of the database file (Default: %(default)s)") - """ + Config.addPasswordOptions(common) + Config.addCommonOptions(common) create = argparse.ArgumentParser(add_help=False) create.add_argument('--schema', dest='schema', default=c.get(t, 'Schema'), help='Path to the schema to use (Default: %(default)s)') @@ -395,9 +392,6 @@ def parseArgs(): configValueParser.add_argument('--key', dest='key', choices=configKeys, required=True, help='Configuration key to set') configValueParser.add_argument('--value', dest='value', required=True, help='Configuration value to access') - Config.addPasswordOptions(common) - Config.addCommonOptions(common) - subs = parser.add_subparsers(help="Commands", dest='command') subs.add_parser('create', parents=[common, create], help='Create a client database') subs.add_parser('setpass', parents=[common], help='Set a password') @@ -415,7 +409,7 @@ def parseArgs(): parser.add_argument('--version', action='version', version='%(prog)s ' + Tardis.__versionstring__, help='Show the version') parser.add_argument('--help', '-h', action='help') - + args = parser.parse_args(remaining) # And load the required strength for new passwords. NOT specifiable on the command line. @@ -423,38 +417,38 @@ def parseArgs(): return args def getBackupSet(db, backup, date, defaultCurrent=False): - bsetInfo = None + bInfo = None if date: cal = parsedatetime.Calendar() (then, success) = cal.parse(date) if success: timestamp = time.mktime(then) logger.debug("Using time: %s", time.asctime(then)) - bsetInfo = db.getBackupSetInfoForTime(timestamp) - if bsetInfo and bsetInfo['backupset'] != 1: - bset = bsetInfo['backupset'] - logger.debug("Using backupset: %s %d", bsetInfo['name'], bsetInfo['backupset']) + bInfo = db.getBackupSetInfoForTime(timestamp) + if bInfo and bInfo['backupset'] != 1: + bset = bInfo['backupset'] + logger.debug("Using backupset: %s %d", bInfo['name'], bInfo['backupset']) else: logger.critical("No backupset at date: %s (%s)", date, time.asctime(then)) - bsetInfo = None + bInfo = None else: logger.critical("Could not parse date string: %s", date) elif backup: try: bset = int(backup) logger.debug("Using integer value: %d", bset) - bsetInfo = db.getBackupSetInfoById(bset) + bInfo = db.getBackupSetInfoById(bset) except ValueError: logger.debug("Using string value: %s", backup) if backup == current: - bsetInfo = db.lastBackupSet() + bInfo = db.lastBackupSet() else: - bsetInfo = db.getBackupSetInfo(backup) - if not bsetInfo: + bInfo = db.getBackupSetInfo(backup) + if not bInfo: logger.critical("No backupset at for name: %s", backup) elif defaultCurrent: - bsetInfo = db.lastBackupSet() - return bsetInfo + bInfo = db.lastBackupSet() + return bInfo def checkPasswordStrength(password): strength, improvements = passwordmeter.test(password) @@ -511,7 +505,7 @@ def main(): if not checkPasswordStrength(newpw): return -1 - if args.newpw == True: + if args.newpw is True: newpw2 = Util.getPassword(args.newpw, args.newpwf, args.newpwp, prompt="New Password for %s: " % (args.client), allowNone=False) if newpw2 != newpw: logger.error("Passwords don't match") @@ -533,26 +527,19 @@ def main(): if args.command == 'keys': return moveKeys(db, crypt) - - if args.command == 'list': - return listBSets(db, crypt) - - if args.command == 'info': - return bsetInfo(db, crypt) - - if args.command == 'purge': - return purge(db, cache, crypt) - - if args.command == 'delete': + elif args.command == 'list': + return listBSets(db) + elif args.command == 'info': + return bsetInfo(db) + elif args.command == 'purge': + return purge(db, cache) + elif args.command == 'delete': return deleteBsets(db, cache) - - if args.command == 'getconfig': + elif args.command == 'getconfig': return getConfig(db) - - if args.command == 'setconfig': + elif args.command == 'setconfig': return setConfig(db) - - if args.command == 'orphans': + elif args.command == 'orphans': return removeOrphans(db, cache) except KeyboardInterrupt: pass diff --git a/Tardis/TardisCrypto.py b/Tardis/TardisCrypto.py index 18c8f1d..3885afb 100644 --- a/Tardis/TardisCrypto.py +++ b/Tardis/TardisCrypto.py @@ -28,10 +28,6 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. -from Cryptodome.Cipher import AES, Blowfish -from Cryptodome.Protocol.KDF import PBKDF2 -import Cryptodome.Random -import socket import hashlib import hmac import os @@ -39,9 +35,13 @@ import base64 import binascii -import Defaults +from Cryptodome.Cipher import AES +from Cryptodome.Protocol.KDF import PBKDF2 +import Cryptodome.Random -class TardisCrypto: +import Tardis.Defaults as Defaults + +class TardisCrypto(object): _contentKey = None _filenameKey = None _tokenKey = None @@ -56,7 +56,7 @@ class TardisCrypto: def __init__(self, password, client=None): self._random = Cryptodome.Random.new() - if client == None: + if client is None: client = Defaults.getDefault('TARDIS_CLIENT') self.client = client @@ -66,8 +66,7 @@ def __init__(self, password, client=None): self._tokenKey = keys[self._keysize:] # And the other one def getContentCipher(self, iv): - cipher = AES.new(self._contentKey, AES.MODE_CBC, IV=iv) - return cipher + return AES.new(self._contentKey, AES.MODE_CBC, IV=iv) def getFilenameCipher(self): #cipher = AES.new(self._filenameKey, AES.MODE_ECB) @@ -77,8 +76,7 @@ def getHash(self, func=hashlib.md5): return hmac.new(self._contentKey, digestmod=func) def getIV(self): - iv = self._random.read(self.ivLength) - return iv + return self._random.read(self.ivLength) def pad(self, data, length=None): if length is None: @@ -87,7 +85,7 @@ def pad(self, data, length=None): data += chr(pad) * pad return data - def unpad(self, data, validate=True): + def unpad(self, data): #if validate: #self.checkpad(data) l = ord(data[-1]) @@ -140,7 +138,7 @@ def decryptFilename(self, name): def createToken(self, client=None): if client is None: - client = self.client + client = self.client cipher = AES.new(self._tokenKey, AES.MODE_ECB) token = base64.b64encode(cipher.encrypt(self.padzero(client)), self._altchars) return token @@ -169,55 +167,3 @@ def setOldStyleKeys(self): self._contentKey = self._tokenKey self._filenameKey = self._keyKey self._filenameEnc = AES.new(self._filenameKey, AES.MODE_ECB) - -if __name__ == "__main__": - enc = TardisCrypto("I've got a password, do you?") - dec = TardisCrypto("I've got a password, do you?") - - print enc.createToken() - print dec.createToken() - - enc.genKeys() - (a, b) = enc.getKeys() - print "Keys: ", a, b - dec.setKeys(a, b) - - #print base64.b64encode(enc._filenameKey) - #print base64.b64encode(enc._contentKey) - - iv = enc.getIV() - cc = enc.getContentCipher(iv) - - fc = enc.getFilenameCipher() - - print "---- Paths" - a = enc.encryptPath('a/b/c/d/e') - b = enc.encryptPath('/srv/music/MP3/CD/Classical/Bartók,_Béla_&_Kodaly,_Zoltan/Bartok_-_The_Miraculous_Mandarin_Kodály_-_Háry_Janos_Dances_Of_Galánta/02.Háry_János,_suite_from_the_opera_for_orchestra,_Prelude.mp3') - c = enc.encryptPath(os.path.join('a' * 16, 'b' * 32, 'c' * 48, 'd' * 64, 'e' * 80, 'f' * 96, 'g' * 112)) - print "1", a - print "2", b - print "3", c - - print "1", dec.decryptPath(a) - print "2", dec.decryptPath(b) - print "3", dec.decryptPath(c) - - print "---- Names" - a = enc.encryptFilename("srv") - print a - print dec.decryptFilename(a) - - print "---- More Names" - b = enc.encryptFilename('02.Háry_János,_suite_from_the_opera_for_orchestra,_Prelude.mp3') - print b - print dec.decryptFilename(b) - - print "---- Data" - pt = "This is a test. This is only a test. This is a test of the Emergency Broadcasting System. Had this been an actual emergency, the attention signal you just heard" - iv = enc.getIV() - cipher = enc.getContentCipher(iv) - ct = cipher.encrypt(enc.pad(pt)) - - decipher = dec.getContentCipher(iv) - dt = decipher.decrypt(ct) - print dt diff --git a/Tardis/TardisDB.py b/Tardis/TardisDB.py index c8f6ab9..865ab59 100644 --- a/Tardis/TardisDB.py +++ b/Tardis/TardisDB.py @@ -29,19 +29,17 @@ # POSSIBILITY OF SUCH DAMAGE. import sqlite3 -import shutil import logging -import os, os.path -import functools +import os +import os.path import time import hashlib import sys -import Tardis -import array import uuid -import ConnIdLogAdapter -import Rotator +import Tardis +import Tardis.ConnIdLogAdapter as ConnIdLogAdapter +import Tardis.Rotator as Rotator # Utility functions @@ -98,10 +96,10 @@ class TardisDB(object): chunksize = 1000 journal = None - def __init__(self, dbname, backup=False, prevSet=None, initialize=None, connid=None, token=None, user=-1, group=-1, chunksize=1000, numbackups=2, journal=None, clientId=None): + def __init__(self, dbname, backup=False, prevSet=None, initialize=None, connid=None, token=None, user=-1, group=-1, chunksize=1000, numbackups=2, journal=None): """ Initialize the connection to a per-machine Tardis Database""" self.logger = logging.getLogger("DB") - self.logger.debug("Initializing connection to {}".format(dbname)) + self.logger.debug("Initializing connection to %s", dbname) self.dbName = dbname self.chunksize = chunksize @@ -121,18 +119,18 @@ def __init__(self, dbname, backup=False, prevSet=None, initialize=None, connid=N self.conn = conn self.cursor = self.conn.cursor() - if (initialize): - self.logger.info("Creating database from schema: {}".format(initialize)) + if initialize: + self.logger.info("Creating database from schema: %s", initialize) try: with open(initialize, "r") as f: script = f.read() self.conn.executescript(script) except IOError as e: - self.logger.error("Could not read initialization script {}".format(initialize)) + self.logger.error("Could not read initialization script %s", initialize) self.logger.exception(e) raise except sqlite3.Error as e: - self.logger.error("Could not execute initialization script {}".format(initialize)) + self.logger.error("Could not execute initialization script %s", initialize) self.logger.exception(e) raise if token: @@ -153,7 +151,7 @@ def __init__(self, dbname, backup=False, prevSet=None, initialize=None, connid=N self.logger.error("Schema version mismatch: Database %s is %d: Expected %d. Please convert", dbname, int(version), _schemaVersion) raise Exception("Schema version mismatch: Database {} is {}: Expected {}. Please convert".format(dbname, version, _schemaVersion)) - if (prevSet): + if prevSet: f = self.getBackupSetInfo(prevSet) if f: self.prevBackupSet = f['backupset'] @@ -171,10 +169,7 @@ def __init__(self, dbname, backup=False, prevSet=None, initialize=None, connid=N self.clientId = self.getConfigValue('ClientID') - #row = self.cursor.fetchone() - #self.prevBackupName = row[0] - #self.prevBackupSet = row[1] - self.logger.debug("Last Backup Set: {} {} ".format(self.prevBackupName, self.prevBackupSet)) + self.logger.debug("Last Backup Set: %s %d ", self.prevBackupName, self.prevBackupSet) self.conn.commit() @@ -202,11 +197,11 @@ def lastBackupSet(self, completed=True): """ Select the last backup set. """ if completed: c = self.cursor.execute("SELECT " + - _backupSetInfoFields + + _backupSetInfoFields + "FROM Backups WHERE Completed = 1 ORDER BY BackupSet DESC LIMIT 1") else: c = self.cursor.execute("SELECT " + - _backupSetInfoFields + + _backupSetInfoFields + "FROM Backups ORDER BY BackupSet DESC LIMIT 1") row = c.fetchone() return row @@ -234,8 +229,8 @@ def newBackupSet(self, name, session, priority, clienttime, version=None, ip=Non {"name": name, "now": now, "session": session, "priority": priority, "full": full, "clienttime": clienttime, "clientversion": version, "clientip": ip, "serversessionid": serverID, - "serverversion": (Tardis.__buildversion__ or Tardis.__version)}) - except sqlite3.IntegrityError as e: + "serverversion": (Tardis.__buildversion__ or Tardis.__version__)}) + except sqlite3.IntegrityError: raise Exception("Backupset {} already exists".format(name)) self.currBackupSet = c.lastrowid @@ -252,9 +247,9 @@ def setBackupSetName(self, name, priority, current=True): backupset = self._bset(current) try: self.conn.execute("UPDATE Backups SET Name = :name, Priority = :priority WHERE BackupSet = :backupset", - {"name": name, "priority": priority, "backupset": backupset}) + {"name": name, "priority": priority, "backupset": backupset}) return True - except sqlite3.IntegrityError as e: + except sqlite3.IntegrityError: return False def setClientConfig(self, config, current=True): @@ -273,7 +268,7 @@ def checkBackupSetName(self, name): c = self.conn.execute("SELECT COUNT(*) FROM Backups WHERE Name = :name", { "name": name }) row = c.fetchone() - return True if row[0] == 0 else False; + return True if row[0] == 0 else False def getFileInfoByName(self, name, parent, current=True): """ Lookup a file in a directory in the previous backup set""" @@ -318,7 +313,7 @@ def getFileInfoByPath(self, path, current=False, permchecker=None): def getFileInfoByPathForRange(self, path, first, last, permchecker=None): sets = self._execute('SELECT BackupSet FROM Backups WHERE BackupSet BETWEEN :first AND :last ORDER BY BackupSet ASC', {'first': first, 'last': last}) for row in sets.fetchall(): - yield (row[0], self.getFileInfoByPath(path, row[0], permchecker)) + yield (row[0], self.getFileInfoByPath(path, row[0], permchecker)) def getFileInfoForPath(self, path, current=False): """ Return the FileInfo structures for each file along a path """ @@ -354,7 +349,7 @@ def getFileInfoBySimilar(self, fileInfo, current=False): self.logger.debug("Looking up file for similar info: %s", fileInfo) temp = fileInfo.copy() temp["backup"] = backupset - c = self.cursor.execute("SELECT " + + c = self.cursor.execute("SELECT " + _fileInfoFields + _fileInfoJoin + "WHERE Inode = :inode AND Device = :dev AND Mtime = :mtime AND C1.Size = :size AND " ":backup BETWEEN Files.FirstSet AND Files.LastSet", @@ -386,14 +381,6 @@ def getFileInfoByInodeFromPartial(self, inode): return c.fetchone() - """ - def copyChecksum(self, old_inode, new_inode): - self.cursor.execute("UPDATE Files SET ChecksumId = (SELECT CheckSumID FROM Files WHERE Inode = :oldInode AND BackupSet = :prev) " - "WHERE INode = :newInode AND BackupSet = :backup", - {"oldInode": old_inode, "newInode": new_inode, "prev": self.prevBackupSet, "backup": self.currBackupSet}) - return self.cursor.rowcount - """ - def setChecksum(self, inode, device, checksum): self.cursor.execute("UPDATE Files SET ChecksumId = (SELECT ChecksumId FROM CheckSums WHERE CheckSum = :checksum) " "WHERE Inode = :inode AND Device = :device AND " @@ -435,12 +422,12 @@ def getChecksumByName(self, name, parent, current=False): (inode, device) = parent self.logger.debug("Looking up checksum for file %s (%d %d) in %d", name, inode, device, backupset) c = self._execute("SELECT CheckSums.CheckSum AS checksum " - "FROM Files " - "JOIN Names ON Files.NameID = Names.NameId " - "JOIN CheckSums ON Files.ChecksumId = CheckSums.ChecksumId " - "WHERE Names.Name = :name AND Files.Parent = :parent AND ParentDev = :parentDev AND " - ":backup BETWEEN Files.FirstSet AND Files.LastSet", - { "name": name, "parent": inode, "parentDev": device, "backup": backupset }) + "FROM Files " + "JOIN Names ON Files.NameID = Names.NameId " + "JOIN CheckSums ON Files.ChecksumId = CheckSums.ChecksumId " + "WHERE Names.Name = :name AND Files.Parent = :parent AND ParentDev = :parentDev AND " + ":backup BETWEEN Files.FirstSet AND Files.LastSet", + { "name": name, "parent": inode, "parentDev": device, "backup": backupset }) row = c.fetchone() return row[0] if row else None #if row: return row[0] else: return None @@ -490,28 +477,28 @@ def insertFile(self, fileInfo, parent): temp = _addFields(fields, fileInfo) self.setNameID([temp]) self._execute("INSERT INTO Files " - "(NameId, FirstSet, LastSet, Inode, Device, Parent, ParentDev, Dir, Link, MTime, CTime, ATime, Mode, UID, GID, NLinks) " - "VALUES " - "(:nameid, :backup, :backup, :inode, :dev, :parent, :parentDev, :dir, :link, :mtime, :ctime, :atime, :mode, :uid, :gid, :nlinks)", - temp) + "(NameId, FirstSet, LastSet, Inode, Device, Parent, ParentDev, Dir, Link, MTime, CTime, ATime, Mode, UID, GID, NLinks) " + "VALUES " + "(:nameid, :backup, :backup, :inode, :dev, :parent, :parentDev, :dir, :link, :mtime, :ctime, :atime, :mode, :uid, :gid, :nlinks)", + temp) def updateDirChecksum(self, directory, cksid, current=True): bset = self._bset(current) (inode, device) = directory self._execute("UPDATE FILES " - "SET ChecksumID = :cksid " - "WHERE Inode = :inode AND DEVICE = :device AND :bset BETWEEN FirstSet AND LastSet", - {"inode": inode, "device": device, "cksid": cksid, "bset": bset}) + "SET ChecksumID = :cksid " + "WHERE Inode = :inode AND DEVICE = :device AND :bset BETWEEN FirstSet AND LastSet", + {"inode": inode, "device": device, "cksid": cksid, "bset": bset}) def extendFile(self, parent, name, old=False, current=True): old = self._bset(old) current = self._bset(current) (parIno, parDev) = parent cursor = self._execute("UPDATE FILES " - "SET LastSet = :new " - "WHERE Parent = :parent AND ParentDev = :parentDev AND NameID = (SELECT NameID FROM Names WHERE Name = :name) AND " - ":old BETWEEN FirstSet AND LastSet", - { "parent": parIno, "parentDev": parDev , "name": name, "old": old, "new": current }) + "SET LastSet = :new " + "WHERE Parent = :parent AND ParentDev = :parentDev AND NameID = (SELECT NameID FROM Names WHERE Name = :name) AND " + ":old BETWEEN FirstSet AND LastSet", + { "parent": parIno, "parentDev": parDev , "name": name, "old": old, "new": current }) return cursor.rowcount def extendFileInode(self, parent, inode, old=False, current=True): @@ -521,10 +508,10 @@ def extendFileInode(self, parent, inode, old=False, current=True): (ino, dev) = inode #self.logger.debug("ExtendFileInode: %s %s %s %s", parent, inode, current, old) cursor = self._execute("UPDATE FILES " - "SET LastSet = :new " - "WHERE Parent = :parent AND ParentDev = :parentDev AND Inode = :inode AND Device = :device AND " - ":old BETWEEN FirstSet AND LastSet", - { "parent": parIno, "parentDev": parDev , "inode": ino, "device": dev, "old": old, "new": current }) + "SET LastSet = :new " + "WHERE Parent = :parent AND ParentDev = :parentDev AND Inode = :inode AND Device = :device AND " + ":old BETWEEN FirstSet AND LastSet", + { "parent": parIno, "parentDev": parDev , "inode": ino, "device": dev, "old": old, "new": current }) return cursor.rowcount def cloneDir(self, parent, new=True, old=False): @@ -533,10 +520,10 @@ def cloneDir(self, parent, new=True, old=False): (parIno, parDev) = parent self.logger.debug("Cloning directory inode %d, %d from %d to %d", parIno, parDev, oldBSet, newBSet) cursor = self._execute("UPDATE FILES " - "SET LastSet = :new " - "WHERE Parent = :parent AND ParentDev = :parentDev AND " - ":old BETWEEN FirstSet AND LastSet", - { "new": newBSet, "old": oldBSet, "parent": parIno, "parentDev": parDev }) + "SET LastSet = :new " + "WHERE Parent = :parent AND ParentDev = :parentDev AND " + ":old BETWEEN FirstSet AND LastSet", + { "new": newBSet, "old": oldBSet, "parent": parIno, "parentDev": parDev }) return cursor.rowcount def setNameID(self, files): @@ -582,10 +569,10 @@ def updateChecksumFile(self, checksum, encrypted=False, size=0, basis=None, delt def getChecksumInfo(self, checksum): self.logger.debug("Getting checksum info on: %s", checksum) c = self._execute("SELECT " - "Checksum AS checksum, ChecksumID AS checksumid, Basis AS basis, Encrypted AS encrypted, " - "Size AS size, DeltaSize AS deltasize, DiskSize AS disksize, IsFile AS isfile, Compressed AS compressed, ChainLength AS chainlength " - "FROM Checksums WHERE CheckSum = :checksum", - {"checksum": checksum}) + "Checksum AS checksum, ChecksumID AS checksumid, Basis AS basis, Encrypted AS encrypted, " + "Size AS size, DeltaSize AS deltasize, DiskSize AS disksize, IsFile AS isfile, Compressed AS compressed, ChainLength AS chainlength " + "FROM Checksums WHERE CheckSum = :checksum", + {"checksum": checksum}) row = c.fetchone() if row: return row @@ -629,7 +616,7 @@ def getChainLength(self, checksum): Could do this, but not all versions of SQLite3 seem to support "WITH RECURSIVE" statements c = self._execute("WITH RECURSIVE x(n) AS (VALUES(:checksum) UNION SELECT Basis FROM Checksums, x WHERE x.n=Checksums.Checksum) " "SELECT COUNT(*) FROM Checksums WHERE Checksum IN x", - {"checksum": checksum}); + {"checksum": checksum}) r = c.fetchone() if r: return int(r[0]) @@ -643,10 +630,10 @@ def readDirectory(self, dirNode, current=False): #self.logger.debug("Reading directory values for (%d, %d) %d", inode, device, backupset) c = self._execute("SELECT " + _fileInfoFields + ", C1.Basis AS basis, C1.Encrypted AS encrypted " + - _fileInfoJoin + - "WHERE Parent = :parent AND ParentDev = :parentDev AND " - ":backup BETWEEN Files.FirstSet AND Files.LastSet", - {"parent": inode, "parentDev": device, "backup": backupset}) + _fileInfoJoin + + "WHERE Parent = :parent AND ParentDev = :parentDev AND " + ":backup BETWEEN Files.FirstSet AND Files.LastSet", + {"parent": inode, "parentDev": device, "backup": backupset}) return _fetchEm(c) #while True: # batch = c.fetchmany(self.chunksize) @@ -687,7 +674,7 @@ def readDirectoryForRange(self, dirNode, first, last): (inode, device) = dirNode #self.logger.debug("Reading directory values for (%d, %d) in range (%d, %d)", inode, device, first, last) c = self._execute("SELECT " + _fileInfoFields + ", " - "C1.Basis AS basis, C1.Encrypted AS encrypted " + + "C1.Basis AS basis, C1.Encrypted AS encrypted " + _fileInfoJoin + "WHERE Parent = :parent AND ParentDev = :parentDev AND " "Files.LastSet >= :first AND Files.FirstSet <= :last", @@ -703,9 +690,9 @@ def listBackupSets(self): #self.logger.debug("list backup sets") # "Name AS name, BackupSet AS backupset " c = self._execute("SELECT " + - _backupSetInfoFields + - "FROM Backups " - "ORDER BY backupset ASC", {}) + _backupSetInfoFields + + "FROM Backups " + "ORDER BY backupset ASC", {}) while True: batch = c.fetchmany(self.chunksize) if not batch: @@ -714,26 +701,26 @@ def listBackupSets(self): yield row def getBackupSetInfoById(self, bset): - c = self._execute("SELECT " + - _backupSetInfoFields + - "FROM Backups WHERE BackupSet = :bset", - { "bset": bset }) + c = self._execute("SELECT " + + _backupSetInfoFields + + "FROM Backups WHERE BackupSet = :bset", + { "bset": bset }) row = c.fetchone() return row def getBackupSetInfo(self, name): - c = self._execute("SELECT " + - _backupSetInfoFields + - "FROM Backups WHERE Name = :name", - { "name": name }) + c = self._execute("SELECT " + + _backupSetInfoFields + + "FROM Backups WHERE Name = :name", + { "name": name }) row = c.fetchone() return row def getBackupSetInfoForTime(self, time): - c = self._execute("SELECT " + - _backupSetInfoFields + - "FROM Backups WHERE BackupSet = (SELECT MAX(BackupSet) FROM Backups WHERE StartTime <= :time)", - { "time": time }) + c = self._execute("SELECT " + + _backupSetInfoFields + + "FROM Backups WHERE BackupSet = (SELECT MAX(BackupSet) FROM Backups WHERE StartTime <= :time)", + { "time": time }) row = c.fetchone() return row @@ -755,8 +742,8 @@ def getBackupSetDetails(self, bset): self.logger.debug("PrevSet: %s, NextSet: %s", prevSet, nextSet) # Count of files that first appeared in this version. May be delta's row = self._executeWithResult("SELECT COUNT(*), SUM(Size), SUM(DiskSize) FROM Files JOIN Checksums ON Files.ChecksumID = Checksums.ChecksumID " - "WHERE Dir = 0 AND FirstSet > :prevSet", - {'prevSet': prevSet}) + "WHERE Dir = 0 AND FirstSet > :prevSet", + {'prevSet': prevSet}) newFiles = row[0] if row[0] else 0 newSize = row[1] if row[1] else 0 newSpace = row[2] if row[2] else 0 @@ -764,8 +751,8 @@ def getBackupSetDetails(self, bset): # Count of files that are last seen in this set, and are not part of somebody else's basis row = self._executeWithResult("SELECT COUNT(*), SUM(Size), SUM(DiskSize) FROM Files JOIN Checksums ON Files.ChecksumID = Checksums.ChecksumID " "WHERE Dir = 0 AND LastSet < :nextSet " - "AND Checksum NOT IN (SELECT Basis FROM Checksums WHERE Basis IS NOT NULL)", - {'nextSet': nextSet}) + "AND Checksum NOT IN (SELECT Basis FROM Checksums WHERE Basis IS NOT NULL)", + {'nextSet': nextSet}) endFiles = row[0] if row[0] else 0 endSize = row[1] if row[1] else 0 endSpace = row[2] if row[2] else 0 @@ -774,9 +761,8 @@ def getBackupSetDetails(self, bset): def setStats(self, newFiles, deltaFiles, bytesReceived, current=True): bset = self._bset(current) - c = self._execute("UPDATE Backups SET FilesFull = :full, FilesDelta = :delta, BytesReceived = :bytes WHERE BackupSet = :bset", - {"bset": bset, "full": newFiles, "delta": deltaFiles, "bytes": bytesReceived}) - + self._execute("UPDATE Backups SET FilesFull = :full, FilesDelta = :delta, BytesReceived = :bytes WHERE BackupSet = :bset", + {"bset": bset, "full": newFiles, "delta": deltaFiles, "bytes": bytesReceived}) def getConfigValue(self, key): c = self._execute("SELECT Value FROM Config WHERE Key = :key", {'key': key }) @@ -784,10 +770,10 @@ def getConfigValue(self, key): return row[0] if row else None def setConfigValue(self, key, value): - c = self._execute("INSERT OR REPLACE INTO Config (Key, Value) VALUES(:key, :value)", {'key': key, 'value': value}) + self._execute("INSERT OR REPLACE INTO Config (Key, Value) VALUES(:key, :value)", {'key': key, 'value': value}) def delConfigValue(self, key): - c = self._execute("DELETE FROM Config WHERE Key = :key", {'key': key}) + self._execute("DELETE FROM Config WHERE Key = :key", {'key': key}) def getToken(self): return self.getConfigValue('Token') @@ -803,10 +789,7 @@ def checkToken(self, token): s = hashlib.sha1() s.update(token) tokenhash = s.hexdigest() - if dbToken == tokenhash: - return True - else: - return False + return (dbToken == tokenhash) def setKeys(self, token, filenameKey, contentKey): try: @@ -847,9 +830,9 @@ def listPurgeSets(self, priority, timestamp, current=False): backupset = self._bset(current) # Select all sets that are purgeable. c = self.cursor.execute("SELECT " + _backupSetInfoFields + " FROM Backups WHERE Priority <= :priority AND EndTime <= :timestamp AND BackupSet < :backupset", - {"priority": priority, "timestamp": str(timestamp), "backupset": backupset}) + {"priority": priority, "timestamp": str(timestamp), "backupset": backupset}) for row in c: - yield(row) + yield row def listPurgeIncomplete(self, priority, timestamp, current=False): backupset = self._bset(current) @@ -858,14 +841,14 @@ def listPurgeIncomplete(self, priority, timestamp, current=False): # If it comes from the HTTPInterface as a string, the <= timestamp doesn't seem to work. c = self.cursor.execute("SELECT " + _backupSetInfoFields + " FROM Backups WHERE Priority <= :priority AND COALESCE(EndTime, StartTime) <= :timestamp AND BackupSet < :backupset AND Completed = 0", - {"priority": priority, "timestamp": str(timestamp), "backupset": backupset}) + {"priority": priority, "timestamp": str(timestamp), "backupset": backupset}) for row in c: - yield(row) + yield row def purgeSets(self, priority, timestamp, current=False): """ Purge old files from the database. Needs to be followed up with calls to remove the orphaned files """ backupset = self._bset(current) - self.logger.debug("Purging backupsets below priority {}, before {}, and backupset: {}".format(priority, timestamp, backupset)) + self.logger.debug("Purging backupsets below priority %d, before %s, and backupset: %d", priority, timestamp, backupset) # First, purge out the backupsets that don't match self.cursor.execute("DELETE FROM Backups WHERE Priority <= :priority AND EndTime <= :timestamp AND BackupSet < :backupset", {"priority": priority, "timestamp": str(timestamp), "backupset": backupset}) @@ -878,7 +861,7 @@ def purgeSets(self, priority, timestamp, current=False): def purgeIncomplete(self, priority, timestamp, current=False): """ Purge old files from the database. Needs to be followed up with calls to remove the orphaned files """ backupset = self._bset(current) - self.logger.debug("Purging incomplete backupsets below priority {}, before {}, and backupset: {}".format(priority, timestamp, backupset)) + self.logger.debug("Purging incomplete backupsets below priority %d, before %s, and backupset: %d", priority, timestamp, backupset) # First, purge out the backupsets that don't match self.cursor.execute("DELETE FROM Backups WHERE Priority <= :priority AND COALESCE(EndTime, StartTime) <= :timestamp AND BackupSet < :backupset AND Completed = 0", {"priority": priority, "timestamp": str(timestamp), "backupset": backupset}) @@ -891,7 +874,7 @@ def purgeIncomplete(self, priority, timestamp, current=False): def deleteBackupSet(self, current=False): bset = self._bset(current) - self.cursor.execute("DELETE FROM Backups WHERE BackupSet = :backupset", {"backupset": bset}); + self.cursor.execute("DELETE FROM Backups WHERE BackupSet = :backupset", {"backupset": bset}) # TODO: Move this to the removeOrphans phase # Then delete the files which are no longer referenced filesDeleted = self._purgeFiles() @@ -914,19 +897,19 @@ def listOrphanChecksums(self, isFile): yield row[0] def deleteOrphanChecksums(self, isFile): - c = self.cursor.execute("DELETE FROM Checksums " - "WHERE ChecksumID NOT IN (SELECT DISTINCT(ChecksumID) FROM Files WHERE ChecksumID IS NOT NULL) " - "AND ChecksumID NOT IN (SELECT DISTINCT(XattrId) FROM Files WHERE XattrID IS NOT NULL) " - "AND ChecksumID NOT IN (SELECT DISTINCT(AclId) FROM Files WHERE AclId IS NOT NULL) " - "AND Checksum NOT IN (SELECT DISTINCT(Basis) FROM Checksums WHERE Basis IS NOT NULL) " - "AND IsFile = :isfile", - { 'isfile': int(isFile)} ) + self.cursor.execute("DELETE FROM Checksums " + "WHERE ChecksumID NOT IN (SELECT DISTINCT(ChecksumID) FROM Files WHERE ChecksumID IS NOT NULL) " + "AND ChecksumID NOT IN (SELECT DISTINCT(XattrId) FROM Files WHERE XattrID IS NOT NULL) " + "AND ChecksumID NOT IN (SELECT DISTINCT(AclId) FROM Files WHERE AclId IS NOT NULL) " + "AND Checksum NOT IN (SELECT DISTINCT(Basis) FROM Checksums WHERE Basis IS NOT NULL) " + "AND IsFile = :isfile", + { 'isfile': int(isFile)} ) return self.cursor.rowcount def compact(self): self.logger.debug("Removing unused names") # Purge out any unused names - c = self.conn.execute("DELETE FROM Names WHERE NameID NOT IN (SELECT NameID FROM Files)"); + self.conn.execute("DELETE FROM Names WHERE NameID NOT IN (SELECT NameID FROM Files)") # Check if we've hit an interval where we want to do a vacuum bset = self._bset(True) @@ -934,21 +917,21 @@ def compact(self): if interval and bset % int(interval): self.logger.debug("Vaccuuming database") # And clean up the database - c = self.conn.execute("VACUUM") + self.conn.execute("VACUUM") def deleteChecksum(self, checksum): self.logger.debug("Deleting checksum: %s", checksum) - c = self.cursor.execute("DELETE FROM Checksums WHERE Checksum = :checksum", {"checksum": checksum}) + self.cursor.execute("DELETE FROM Checksums WHERE Checksum = :checksum", {"checksum": checksum}) return self.cursor.rowcount def commit(self): self.conn.commit() def close(self, completeBackup=False): - self.logger.debug("Closing DB: {}".format(self.dbName)) + self.logger.debug("Closing DB: %s", self.dbName) if self.currBackupSet: self.conn.execute("UPDATE Backups SET EndTime = :now WHERE BackupSet = :backup", - { "now": time.time(), "backup": self.currBackupSet }) + { "now": time.time(), "backup": self.currBackupSet }) self.conn.commit() self.conn.close() self.conn = None @@ -966,13 +949,11 @@ def __del__(self): self.close() if __name__ == "__main__": - import sys - import uuid - x = TardisDB(sys.argv[1]) - x.newBackupSet(sys.argv[2], str(uuid.uuid1())) - rec = x.getFileInfoByName("File1", 1) + db = TardisDB(sys.argv[1]) + db.newBackupSet(sys.argv[2], str(uuid.uuid1())) + rec = db.getFileInfoByName("File1", 1) print rec - print x.getFileInfoByInode(2) + print db.getFileInfoByInode(2) info = { "name" : "Dir", "inode" : 1, @@ -986,7 +967,7 @@ def __del__(self): "gid" : 100, "cksum" : None } - x.insertFile(info) + db.insertFile(info) info = { "name" : "File1", "inode" : 2, @@ -1000,6 +981,6 @@ def __del__(self): "gid" : 100, "cksum" : None } - x.insertFile(info) - x.completeBackup() - x.commit() + db.insertFile(info) + db.completeBackup() + db.commit() diff --git a/Tardis/TardisFS.py b/Tardis/TardisFS.py index 1aca575..bb84114 100644 --- a/Tardis/TardisFS.py +++ b/Tardis/TardisFS.py @@ -29,33 +29,29 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. -import fuse - -fuse.fuse_python_api = (0, 2) -from time import time - -import stat # for file properties import os # for filesystem modes (O_RDONLY, etc) +import os.path import errno # for error number codes (ENOENT, etc) # - note: these must be returned as negatives import sys -import os.path import logging import tempfile -import socket import json import base64 +import time +import stat # for file properties + +import fuse import Tardis -import TardisDB -import RemoteDB -import CacheDir -import Regenerator -import TardisCrypto -import Util -import Cache -import Defaults +import Tardis.CacheDir as CacheDir +import Tardis.Regenerator as Regenerator +import Tardis.Util as Util +import Tardis.Cache as Cache +import Tardis.Defaults as Defaults + +fuse.fuse_python_api = (0, 2) _BackupSetInfo = 0 _LastBackupSet = 1 @@ -102,7 +98,11 @@ def getParts(path): class TardisFS(fuse.Fuse): """ + FUSE filesystem to read data from a Tardis Backup Database """ + # Disable pylint complaints about "could me a function" and "unused argument" as lots of required FUSE functions + # just return "read-only FS" status + # pragma pylint: disable=no-self-use,unused-argument backupsets = {} dirInfo = {} fsencoding = sys.getfilesystemencoding() @@ -185,6 +185,10 @@ def __init__(self, *args, **kw): self.regenerator = Regenerator.Regenerator(self.cacheDir, self.tardis, crypt=self.crypt) self.files = {} + # Fuse variables + self.flags = 0 + self.multithreaded = 0 + except Exception as e: self.log.exception(e) sys.exit(2) @@ -198,7 +202,7 @@ def fsEncodeName(self, name): else: return name.encode(self.fsencoding) - def getBackupSetInfo(self, b, requestTime = None): + def getBackupSetInfo(self, b): key = (_BackupSetInfo, b) info = self.cache.retrieve(key) if info: @@ -242,7 +246,7 @@ def getDirInfo(self, path): if info: self.cache.insert(key, info) return info - + def getFileInfoByPath(self, path): #self.log.info("getFileInfoByPath: %s", path) @@ -325,7 +329,7 @@ def getattr(self, path): # Root directory contents lead = getParts(path) st = fuse.Stat() - if (lead[0] == self.current): + if lead[0] == self.current: target = self.lastBackupSet(True) timestamp = float(target['endtime']) st.st_mode = stat.S_IFLNK | 0755 @@ -380,7 +384,7 @@ def getattr(self, path): @tracer - def getdir(self, path): + def getdir(self, _): """ return: [[('file1', 0), ('file2', 0), ... ]] """ @@ -390,7 +394,6 @@ def getdir(self, path): @tracer def readdir(self, path, offset): #self.log.info("CALL readdir %s Offset: %d", path, offset) - inodes = {} parent = None path = self.fsEncodeName(path) @@ -416,8 +419,8 @@ def readdir(self, path, offset): #entries = self.decryptNames(entries) # For each entry, cache it, so a later getattr() call can use it. - # Get attr will typically be called promptly after a call to - now = time() + # Get attr will typically be called promptly after a call to + now = time.time() for e in entries: name = e['name'] if self.crypt: @@ -478,7 +481,7 @@ def open ( self, path, flags ): depth = getDepth(path) # depth of path, zero-based from root - if (depth < 2): + if depth < 2: return -errno.ENOENT # TODO: Lock this @@ -497,7 +500,7 @@ def open ( self, path, flags ): try: f.flush() f.seek(0) - except AttributeError, IOError: + except (AttributeError, IOError): bytesCopied = 0 self.log.debug("Copying file to tempfile") temp = tempfile.TemporaryFile() @@ -538,7 +541,7 @@ def readlink ( self, path ): return link if path == '/' + self.current: target = self.lastBackupSet(True) - self.log.debug("Path: {} Target: {} {}".format(path, target['name'], target['backupset'])) + self.log.debug("Path: %s Target: %s %s", path, target['name'], target['backupset']) link = str(target['name']) self.cache.insert(key, link) return link @@ -565,7 +568,7 @@ def release ( self, path, flags ): path = self.fsEncodeName(path) if self.files[path]: - self.files[path]["opens"] -= 1; + self.files[path]["opens"] -= 1 if self.files[path]["opens"] == 0: self.files[path]["file"].close() del self.files[path] @@ -602,7 +605,7 @@ def statfs ( self ): st.f_namemax = fs.f_namemax return st else: - return -errorno.EINVAL + return -errno.EINVAL def symlink ( self, targetPath, linkPath ): #self.log.info('CALL symlink {} {}'.format(path, linkPath)) @@ -635,19 +638,19 @@ def write ( self, path, buf, offset ): @tracer def listxattr ( self, path, size ): path = self.fsEncodeName(path) - self.log.info('CALL listxattr {} {}'.format(path, size)) + self.log.info('CALL listxattr %s %d', path, size) if size == 0: retFunc = lambda x: len("".join(x)) + len(str(x)) else: retFunc = lambda x: x - if (getDepth(path) == 1): + if getDepth(path) == 1: parts = getParts(path) b = self.getBackupSetInfo(parts[0]) if b: return retFunc(self.attrMap.keys()) - if (getDepth(path) > 1): + if getDepth(path) > 1: parts = getParts(path) b = self.getBackupSetInfo(parts[0]) if b: @@ -678,7 +681,7 @@ def getxattr (self, path, attr, size): if size == 0: retFunc = lambda x: len(str(x)) else: - retFunc = lambda x: str(x) + retFunc = str depth = getDepth(path) @@ -703,18 +706,18 @@ def getxattr (self, path, attr, size): if checksum: return retFunc(checksum) elif attr == 'user.tardis_since': - if b: + if b: since = self.tardis.getFirstBackupSet(subpath, b['backupset']) #self.log.debug(str(since)) if since: return retFunc(since) elif attr == 'user.tardis_chain': - info = self.tardis.getChecksumInfoByPath(subpath, b['backupset']) - #self.log.debug(str(checksum)) - if info: - chain = str(info['chainlength']) - self.log.debug(str(chain)) - return retFunc(chain) + info = self.tardis.getChecksumInfoByPath(subpath, b['backupset']) + #self.log.debug(str(checksum)) + if info: + chain = str(info['chainlength']) + self.log.debug(str(chain)) + return retFunc(chain) else: # Must be an imported value. Let's generate it. info = self.getFileInfoByPath(path) diff --git a/Tardis/Util.py b/Tardis/Util.py index e390af5..84dc76d 100644 --- a/Tardis/Util.py +++ b/Tardis/Util.py @@ -36,7 +36,6 @@ import subprocess import hashlib import shlex -import StringIO import getpass import stat import fnmatch @@ -45,21 +44,21 @@ import base64 from functools import partial -import Messages -import Connection -import CompressedBuffer -import Tardis -import Defaults - -import TardisDB -import TardisCrypto -import CacheDir -import RemoteDB - #import pycurl import urlparse import urllib +import Tardis.Connection as Connection +import Tardis.CompressedBuffer as CompressedBuffer +import Tardis.Defaults as Defaults + +import Tardis.TardisDB as TardisDB +import Tardis.TardisCrypto as TardisCrypto +import Tardis.CacheDir as CacheDir +import Tardis.RemoteDB as RemoteDB + + + logger = logging.getLogger('UTIL') def fmtSize(num, base=1024, formats = ['bytes','KB','MB','GB', 'TB', 'PB']): @@ -79,7 +78,7 @@ def getIntOrNone(config, section, name): try: x = config.get(section, name) return int(x, 0) - except: + except Exception: return None def stripComments(line): @@ -124,13 +123,13 @@ def shortPath(path, width=80): return "..." + os.sep + retPath -def accumulateStat(stats, stat, amount=1): +def accumulateStat(stats, name, amount=1): if stats: - stats[stat] = stats.setdefault(stat, 0) + amount + stats[name] = stats.setdefault(name, 0) + amount + + +# Functions for reducing a path. -""" -Functions for reducing a path. -""" def findDirInRoot(tardis, bset, path, crypt=None): #logger = logging.getLogger('UTIL') """ @@ -150,7 +149,7 @@ def findDirInRoot(tardis, bset, path, crypt=None): return i return None -def reducePath(tardis, bset, path, reduce, crypt=None): +def reducePath(tardis, bset, path, reduceBy, crypt=None): #logger = logging.getLogger('UTIL') """ Reduce a path by a specified number of directory levels. @@ -158,21 +157,18 @@ def reducePath(tardis, bset, path, reduce, crypt=None): element which occurs in the root directory. """ #logger.debug("Computing path for %s in %d (%d)", path, bset, reduce) - if reduce == sys.maxint: - reduce = findDirInRoot(tardis, bset, path, crypt) - if reduce: - #logger.debug("Reducing path by %d entries: %s", reduce, path) + if reduceBy == sys.maxint: + reduceBy = findDirInRoot(tardis, bset, path, crypt) + if reduceBy: + #logger.debug("Reducing path by %d entries: %s", reduceBy, path) comps = path.split(os.sep) - if reduce > len(comps): - #logger.error("Path reduction value (%d) greater than path length (%d) for %s. Skipping.", reduce, len(comps), path) + if reduceBy > len(comps): + #logger.error("Path reduction value (%d) greater than path length (%d) for %s. Skipping.", reduceBy, len(comps), path) return None - tmp = os.path.join(os.sep, *comps[reduce + 1:]) - #logger.info("Reduced path %s to %s", path, tmp) + tmp = os.path.join(os.sep, *comps[reduceBy + 1:]) + #logger.info("reduced path %s to %s", path, tmp) path = tmp - return path - -""" -""" + return path def isMagic(path): if ('*' in path) or ('?' in path) or ('[' in path): @@ -185,7 +181,7 @@ def matchPath(pattern, path): pats = pattern.split(os.sep) dirs = path.split(os.sep) inWild = False - while (len(pats) != 0 and len(dirs) != 0): + while len(pats) != 0 and len(dirs) != 0: if not inWild: p = pats.pop(0) d = dirs.pop(0) @@ -278,15 +274,14 @@ def getPassword(password, pwurl, pwprog, prompt='Password: ', allowNone=True): pwf.close() if pwprog: - args = shlex.split(pwprog) - output = subprocess.check_output(args) + a = shlex.split(pwprog) + output = subprocess.check_output(a) password = output.split('\n')[0].rstrip() return password -""" -Get the database, cachedir, and crypto object. -""" +# Get the database, cachedir, and crypto object. + def setupDataConnection(dataLoc, client, password, keyFile, dbName, dbLoc=None): crypt = None if password: @@ -324,9 +319,9 @@ def setupDataConnection(dataLoc, client, password, keyFile, dbName, dbLoc=None): return (tardis, cache, crypt) -""" -Data manipulation functions -""" + +# Data manipulation functions + _suffixes = [".basis", ".sig", ".meta", ""] def _removeOrphans(db, cache): #logger = logging.getLogger('UTIL') @@ -350,9 +345,8 @@ def _removeOrphans(db, cache): cache.removeSuffixes(cksum, _suffixes) db.deleteChecksum(cksum) - except OSError as e: - logger.warning("No checksum file for checksum %s", c) - pass # Do something better here. + except OSError: + logger.warning("No checksum file for checksum %s", cksum) return count, size def removeOrphans(db, cache): @@ -362,7 +356,7 @@ def removeOrphans(db, cache): # Repeatedly prune the file trees until there are no more checksums # we have to do this, as there can be multiple levels of basis files, each dependant on the one above (below?) # Theoretically we should be able to do this is one go, but SQLite's implementation of recursive queries doesn't - # seem to work quite right. + # seem to work quite right. while True: (lCount, lSize) = _removeOrphans(db, cache) if lCount == 0: @@ -374,9 +368,7 @@ def removeOrphans(db, cache): db.deleteOrphanChecksums(False) return count, size, rounds -""" -Data transmission functions -""" +# Data transmission functions def _chunks(stream, chunksize): last = '' @@ -387,11 +379,12 @@ def _chunks(stream, chunksize): yield (last, True) def sendData(sender, data, encrypt=lambda x:x, pad=lambda x:x, chunksize=(16 * 1024), hasher=None, compress=False, stats=None, signature=False, hmac=None, iv=None, progress=None, progressPeriod=8*1024*1024): - """ Send a block of data, optionally encrypt and/or compress it before sending """ + """ + Send a block of data, optionally encrypt and/or compress it before sending + """ #logger = logging.getLogger('Data') if isinstance(sender, Connection.Connection): sender = sender.sender - num = 0 size = 0 status = "OK" ck = None @@ -532,11 +525,12 @@ def saveKeys(name, client, nameKey, contentKey): with open(name, 'wb') as configfile: config.write(configfile) -""" -Class to handle options of the form "--[no]argument" where you can specify --noargument to store a False, -or --argument to store a true. -""" + class StoreBoolean(argparse.Action): + """ + Class to handle options of the form "--[no]argument" where you can specify --noargument to store a False, + or --argument to store a true. + """ def __init__(self, option_strings, dest, negate="no", nargs=0, **kwargs): if nargs is not 0: raise ValueError("nargs not allowed") @@ -547,18 +541,19 @@ def __init__(self, option_strings, dest, negate="no", nargs=0, **kwargs): option_strings.append(self.negative_option) super(StoreBoolean, self).__init__(option_strings, dest, nargs=0, **kwargs) - def __call__(self, parser, args, values, option_string=None): + def __call__(self, parser, arguments, values, option_string=None): #print "Here: ", option_string, " :: ", self.option_strings if option_string == self.negative_option: value = False else: value = True - setattr(args, self.dest, value) + setattr(arguments, self.dest, value) + -""" -Class to handle toggling options. -x = true -xx = false -xxx = true, etc -""" class Toggle(argparse.Action): + """ + Class to handle toggling options. -x = true -xx = false -xxx = true, etc + """ def __init__(self, option_strings, dest, @@ -577,10 +572,9 @@ def __call__(self, parser, namespace, values, option_string=None): new_value = not argparse._ensure_value(namespace, self.dest, False) setattr(namespace, self.dest, new_value) -""" -Help formatter to handle the StoreBoolean options. -Only handles overriding the basic HelpFormatter class. -""" +# Help formatter to handle the StoreBoolean options. +# Only handles overriding the basic HelpFormatter class. + class HelpFormatter(argparse.HelpFormatter): def _format_action_invocation(self, action): #print "_format_action_invocation", str(action) @@ -591,9 +585,8 @@ def _format_action_invocation(self, action): #print "Got ", ret return ret -""" -Argument formatter. Useful for converting our command line arguments into strings" -""" +# Argument formatter. Useful for converting our command line arguments into strings" + class ArgJsonEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, types.FileType): @@ -607,15 +600,14 @@ def default(self, obj): return json.JSONEncoder(self, obj) -""" -Class to have a two directional dictionary. -""" +# Class to have a two directional dictionary. + class bidict(dict): def __init__(self, *args, **kwargs): super(bidict, self).__init__(*args, **kwargs) self.inverse = {} for key, value in self.iteritems(): - self.inverse.setdefault(value,[]).append(key) + self.inverse.setdefault(value,[]).append(key) def __setitem__(self, key, value): super(bidict, self).__setitem__(key, value) @@ -623,25 +615,23 @@ def __setitem__(self, key, value): def __delitem__(self, key): self.inverse.setdefault(self[key],[]).remove(key) - if self[key] in self.inverse and not self.inverse[self[key]]: + if self[key] in self.inverse and not self.inverse[self[key]]: del self.inverse[self[key]] super(bidict, self).__delitem__(key) -""" -Get a hash function. Configurable. -""" +# Get a hash function. Configurable. + def getHash(crypt=None, doCrypt=True, func=hashlib.md5): if crypt and doCrypt: return crypt.getHash(func) else: return func() -""" -'Test' code -""" + +# 'Test' code if __name__ == "__main__": - p = argparse.ArgumentParser(formatter_class=MyHelpFormatter) + p = argparse.ArgumentParser(formatter_class=HelpFormatter) p.add_argument("--doit", action=StoreBoolean, help="Yo mama") p.add_argument("-x", action=Toggle, help="Whatever") diff --git a/Tardis/__init__.py b/Tardis/__init__.py index 80f4b49..19bfa6d 100644 --- a/Tardis/__init__.py +++ b/Tardis/__init__.py @@ -48,7 +48,7 @@ if __buildversion__: __versionstring__ = __version__ + ' (' + __buildversion__ + ')' -def __check_features(): +def check_features(): xattr_pkg = 'xattr' acl_pkg = 'pylibacl' os_info = os.uname() diff --git a/setup.py b/setup.py index 1812baf..2db4f97 100755 --- a/setup.py +++ b/setup.py @@ -1,24 +1,25 @@ #! /usr/bin/python -from setuptools import setup, find_packages -import sys, os +import os import subprocess +from setuptools import setup, find_packages + +import Tardis + longdesc = ''' This is a system for performing backups, supporting incremental, delta backups, with option encryption, and recovery of data via either a filesystem based interface, or via explicit tools. Please pardon any Dr. Who jokes. ''' - + buildVersion = subprocess.check_output(['git', 'describe', '--dirty', '--tags', '--always']).strip() file('tardisversion', 'w').write(buildVersion + "\n") -import Tardis - root = os.environ.setdefault('VIRTUAL_ENV', '') version = Tardis.__version__ -add_pkgs = Tardis.__check_features() +add_pkgs = Tardis.check_features() setup( name = 'Tardis-Backup', version = version, @@ -47,27 +48,27 @@ ( root + '/etc/logwatch/scripts/services', [ 'logwatch/scripts/services/tardisd' ]), ], entry_points = { - 'console_scripts' : [ - 'tardis = Tardis.Client:main', - 'tardisd = Tardis.Daemon:main', - 'tardisfs = Tardis.TardisFS:main', - 'regenerate = Tardis.Regenerate:main', - 'lstardis = Tardis.List:main', - 'sonic = Tardis.Sonic:main', - 'tardiff = Tardis.Diff:main', - 'tardisremote = Tardis.HttpInterface:tornado', - ], + 'console_scripts' : [ + 'tardis = Tardis.Client:main', + 'tardisd = Tardis.Daemon:main', + 'tardisfs = Tardis.TardisFS:main', + 'regenerate = Tardis.Regenerate:main', + 'lstardis = Tardis.List:main', + 'sonic = Tardis.Sonic:main', + 'tardiff = Tardis.Diff:main', + 'tardisremote = Tardis.HttpInterface:tornado', + ], }, classifiers = [ - 'License :: OSI Approved :: BSD License', - 'Development Status :: 4 - Beta', - 'Intended Audience :: Developers', - 'Intended Audience :: System Administrators', - 'Topic :: System :: Archiving :: Backup', - 'Programming Language :: Python', - 'Programming Language :: Python :: 2.7' - 'Operating System :: MacOS :: MacOS X', - 'Operating System :: POSIX', - 'Operating System :: POSIX :: Linux', + 'License :: OSI Approved :: BSD License', + 'Development Status :: 4 - Beta', + 'Intended Audience :: Developers', + 'Intended Audience :: System Administrators', + 'Topic :: System :: Archiving :: Backup', + 'Programming Language :: Python', + 'Programming Language :: Python :: 2.7' + 'Operating System :: MacOS :: MacOS X', + 'Operating System :: POSIX', + 'Operating System :: POSIX :: Linux', ] - ) + ) diff --git a/tardis b/tardis index 7e39a35..256f6b8 100755 --- a/tardis +++ b/tardis @@ -1,7 +1,7 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -from Tardis import Client import sys +from Tardis import Client sys.exit(Client.main()) diff --git a/tardisd b/tardisd index 389378d..583c845 100755 --- a/tardisd +++ b/tardisd @@ -1,7 +1,7 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -from Tardis import Daemon import sys +from Tardis import Daemon sys.exit(Daemon.main()) diff --git a/tardisfs b/tardisfs index 34028b9..bdf0bbf 100755 --- a/tardisfs +++ b/tardisfs @@ -1,7 +1,7 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -from Tardis import TardisFS import sys +from Tardis import TardisFS sys.exit(TardisFS.main()) diff --git a/tardisremote b/tardisremote index 3a4ffc0..a84ddc4 100755 --- a/tardisremote +++ b/tardisremote @@ -1,7 +1,7 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -from Tardis import HttpInterface import sys +from Tardis import HttpInterface sys.exit(HttpInterface.tornado())