From c7f71ff70bdcc68efc2c88e736a6c39443e33ab6 Mon Sep 17 00:00:00 2001 From: Fausto Marzi Date: Wed, 27 May 2015 13:59:01 +0100 Subject: [PATCH] Consistent opt args and variable to dentify that same opt arg The freezerc was using an inconsistent naming convenstion between option arguments and variable to the same opt argument i.e,: --path-to-backup -> (backup_opt_dict.src_file) Change-Id: I6e70e60797742054e0e5e10b7b2398bc42354e3b Implements: blueprint consistent-args --- freezer/arguments.py | 20 +++++------ freezer/backup.py | 17 ++++----- freezer/job.py | 6 ++-- freezer/lvm.py | 5 +-- freezer/swift.py | 6 ++-- freezer/tar.py | 20 +++++------ freezer/utils.py | 60 +++++++++++++++---------------- tests/commons.py | 24 ++++++------- tests/scenario/backup_scenario.py | 10 +++--- tests/test_backup.py | 10 +++--- tests/test_job.py | 4 +-- tests/test_swift.py | 4 +-- tests/test_tar.py | 4 +-- tests/test_utils.py | 6 ++-- 14 files changed, 99 insertions(+), 97 deletions(-) diff --git a/freezer/arguments.py b/freezer/arguments.py index b30b1a5e..e92e27f3 100644 --- a/freezer/arguments.py +++ b/freezer/arguments.py @@ -78,7 +78,7 @@ def backup_arguments(args_dict={}): arg_parser.add_argument( '-F', '--path-to-backup', '--file-to-backup', action='store', help="The file or directory you want to back up to Swift", - dest='src_file', default=False) + dest='path_to_backup', default=False) arg_parser.add_argument( '-N', '--backup-name', action='store', help="The backup name you want to use to identify your backup \ @@ -96,7 +96,7 @@ def backup_arguments(args_dict={}): arg_parser.add_argument( '-L', '--list-containers', action='store_true', help='''List the Swift containers on remote Object Storage Server''', - dest='list_container', default=False) + dest='list_containers', default=False) arg_parser.add_argument( '-l', '--list-objects', action='store_true', help='''List the Swift objects stored in a container on remote Object\ @@ -104,7 +104,7 @@ def backup_arguments(args_dict={}): arg_parser.add_argument( '-o', '--get-object', action='store', help="The Object name you want to download on the local file system.", - dest='object', default=False) + dest='get_object', default=False) arg_parser.add_argument( '-d', '--dst-file', action='store', help="The file name used to save the object on your local disk and\ @@ -143,7 +143,7 @@ def backup_arguments(args_dict={}): help="Set the backup level used with tar to implement incremental \ backup. If a level 1 is specified but no level 0 is already \ available, a level 0 will be done and subsequently backs to level 1.\ - Default 0 (No Incremental)", dest='max_backup_level', + Default 0 (No Incremental)", dest='max_level', type=int, default=False) arg_parser.add_argument( '--always-level', action='store', help="Set backup\ @@ -152,14 +152,14 @@ def backup_arguments(args_dict={}): level 3 and to that point always a backup level 3 will be executed. \ It will not restart from level 0. This option has precedence over \ --max-backup-level. Default False (Disabled)", - dest='always_backup_level', type=int, default=False) + dest='always_level', type=int, default=False) arg_parser.add_argument( '--restart-always-level', action='store', help="Restart the backup \ from level 0 after n days. Valid only if --always-level option \ if set. If --always-level is used together with --remove-older-then, \ there might be the chance where the initial level 0 will be removed \ Default False (Disabled)", - dest='restart_always_backup', type=float, default=False) + dest='restart_always_level', type=float, default=False) arg_parser.add_argument( '-R', '--remove-older-then', '--remove-older-than', action='store', help=('Checks in the specified container for object older than the ' @@ -199,7 +199,7 @@ def backup_arguments(args_dict={}): user = password = port = ''', - dest='mysql_conf_file', default=False) + dest='mysql_conf', default=False) if is_windows(): arg_parser.add_argument( '--log-file', action='store', @@ -238,7 +238,7 @@ def backup_arguments(args_dict={}): '-M', '--max-segment-size', action='store', help="Set the maximum file chunk size in bytes to upload to swift\ Default 67108864 bytes (64MB)", - dest='max_seg_size', type=int, default=67108864) + dest='max_segment_size', type=int, default=67108864) arg_parser.add_argument( '--restore-abs-path', action='store', help=('Set the absolute path where you want your data restored. ' @@ -279,7 +279,7 @@ def backup_arguments(args_dict={}): '--os-auth-ver', choices=['1', '2', '3'], action='store', help='Swift auth version, could be 1, 2 or 3', - dest='auth_version', default=2) + dest='os_auth_ver', default=2) arg_parser.add_argument( '--proxy', action='store', help='''Enforce proxy that alters system HTTP_PROXY and HTTPS_PROXY, @@ -314,7 +314,7 @@ def backup_arguments(args_dict={}): the sql server instance. Following is an example of config file: instance = ''', - dest='sql_server_config', default=False) + dest='sql_server_conf', default=False) arg_parser.add_argument( '--volume', action='store', help='Create a snapshot of the selected volume', diff --git a/freezer/backup.py b/freezer/backup.py index 32af9fc7..c72debac 100644 --- a/freezer/backup.py +++ b/freezer/backup.py @@ -53,7 +53,7 @@ def backup_mode_sql_server(backup_opt_dict, time_stamp, manifest_meta_dict): as the backup finish the db will be unlocked and the backup will be uploaded. A sql_server.conf_file is required for this operation. """ - with open(backup_opt_dict.sql_server_config, 'r') as sql_conf_file_fd: + with open(backup_opt_dict.sql_server_conf, 'r') as sql_conf_file_fd: for line in sql_conf_file_fd: if 'instance' in line: db_instance = line.split('=')[1].strip() @@ -76,7 +76,7 @@ def backup_mode_mysql(backup_opt_dict, time_stamp, manifest_meta_dict): the db tables will be flushed and locked for read, then the lvm create command will be executed and after that, the table will be unlocked and the backup will be executed. It is important to have the available in - backup_args.mysql_conf_file the file where the database host, name, user, + backup_args.mysql_conf the file where the database host, name, user, password and port are set. """ @@ -85,14 +85,14 @@ def backup_mode_mysql(backup_opt_dict, time_stamp, manifest_meta_dict): except ImportError: raise ImportError('Please install PyMySQL module') - if not backup_opt_dict.mysql_conf_file: + if not backup_opt_dict.mysql_conf: raise ValueError('MySQL: please provide a valid config file') - # Open the file provided in backup_args.mysql_conf_file and extract the + # Open the file provided in backup_args.mysql_conf and extract the # db host, name, user, password and port. db_user = db_host = db_pass = False # Use the default mysql port if not provided db_port = 3306 - with open(backup_opt_dict.mysql_conf_file, 'r') as mysql_file_fd: + with open(backup_opt_dict.mysql_conf, 'r') as mysql_file_fd: for line in mysql_file_fd: if 'host' in line: db_host = line.split('=')[1].strip() @@ -223,9 +223,10 @@ def backup_mode_fs(backup_opt_dict, time_stamp, manifest_meta_dict): tar_backup_queue = multiprocessing.Queue(maxsize=2) if is_windows(): - backup_opt_dict.absolute_path = backup_opt_dict.src_file - backup_opt_dict.src_file = use_shadow(backup_opt_dict.src_file, - backup_opt_dict.volume) + backup_opt_dict.absolute_path = backup_opt_dict.path_to_backup + backup_opt_dict.path_to_backup = use_shadow( + backup_opt_dict.path_to_backup, + backup_opt_dict.volume) # Execute a tar gzip of the specified directory and return # small chunks (default 128MB), timestamp, backup, filename, diff --git a/freezer/job.py b/freezer/job.py index 3cfdba78..a8a9f04b 100644 --- a/freezer/job.py +++ b/freezer/job.py @@ -58,7 +58,7 @@ def wrapper(self): class InfoJob(Job): @Job.executemethod def execute(self): - if self.conf.list_container: + if self.conf.list_containers: swift.show_containers(self.conf) elif self.conf.list_objects: containers = swift.check_container_existance(self.conf) @@ -85,8 +85,8 @@ def execute(self): swift.create_containers(self.conf) if self.conf.no_incremental: - if self.conf.max_backup_level or \ - self.conf.always_backup_level: + if self.conf.max_level or \ + self.conf.always_level: raise Exception( 'no-incremental option is not compatible ' 'with backup level options') diff --git a/freezer/lvm.py b/freezer/lvm.py index 8be63e2a..c18acb69 100644 --- a/freezer/lvm.py +++ b/freezer/lvm.py @@ -202,11 +202,12 @@ def lvm_snap(backup_opt_dict): def get_lvm_info(backup_opt_dict): """ - Take a file system path as argument as backup_opt_dict.src_file + Take a file system path as argument as backup_opt_dict.path_to_backup and return a dictionary containing dictionary['lvm_srcvol'] and dictionary['lvm_volgroup'] where the path is mounted on. - :param backup_opt_dict: backup_opt_dict.src_file, the file system path + :param backup_opt_dict: backup_opt_dict.path_to_backup, the file system + path :returns: the dictionary backup_opt_dict containing keys lvm_srcvol and lvm_volgroup with respective values """ diff --git a/freezer/swift.py b/freezer/swift.py index 61ac3918..513a5c22 100644 --- a/freezer/swift.py +++ b/freezer/swift.py @@ -65,7 +65,7 @@ def show_containers(backup_opt_dict): Print remote containers in sorted order """ - if not backup_opt_dict.list_container: + if not backup_opt_dict.list_containers: return False ordered_container = {} @@ -312,7 +312,7 @@ def get_client(backup_opt_dict): user=options.user_name, key=options.password, tenant_name=options.tenant_name, os_options=options.os_options, - auth_version=backup_opt_dict.auth_version, + auth_version=backup_opt_dict.os_auth_ver, insecure=backup_opt_dict.insecure, retries=6) if backup_opt_dict.dry_run: @@ -423,7 +423,7 @@ def add_object( break package_name = u'{0}/{1}/{2}/{3}'.format( package_name, time_stamp, - backup_opt_dict.max_seg_size, file_chunk_index) + backup_opt_dict.max_segment_size, file_chunk_index) add_chunk(backup_opt_dict, package_name, file_chunk) diff --git a/freezer/tar.py b/freezer/tar.py index d0e8f555..9d98ff2e 100644 --- a/freezer/tar.py +++ b/freezer/tar.py @@ -161,20 +161,20 @@ def gen_tar_command( required_list = [ opt_dict.backup_name, - opt_dict.src_file, - os.path.exists(opt_dict.src_file)] + opt_dict.path_to_backup, + os.path.exists(opt_dict.path_to_backup)] if not validate_all_args(required_list): raise Exception('Error: Please ALL the following options: ' '--path-to-backup, --backup-name') - # Change che current working directory to op_dict.src_file - os.chdir(os.path.normpath(opt_dict.src_file.strip())) + # Change che current working directory to op_dict.path_to_backup + os.chdir(os.path.normpath(opt_dict.path_to_backup.strip())) logging.info('[*] Changing current working directory to: {0} \ - '.format(opt_dict.src_file)) + '.format(opt_dict.path_to_backup)) logging.info('[*] Backup started for: {0} \ - '.format(opt_dict.src_file)) + '.format(opt_dict.path_to_backup)) # Tar option for default behavior. Please refer to man tar to have # a better options explanation @@ -232,7 +232,7 @@ def tar_backup(opt_dict, tar_command, backup_queue): tar_chunk = b'' logging.info( '[*] Archiving and compressing files from {0}'.format( - opt_dict.src_file)) + opt_dict.path_to_backup)) tar_process = subprocess.Popen( tar_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, @@ -242,14 +242,14 @@ def tar_backup(opt_dict, tar_command, backup_queue): for file_block in tar_process.stdout: tar_chunk += file_block file_read_limit += len(file_block) - if file_read_limit >= opt_dict.max_seg_size: + if file_read_limit >= opt_dict.max_segment_size: backup_queue.put( ({("%08d" % file_chunk_index): tar_chunk})) file_chunk_index += 1 tar_chunk = b'' file_read_limit = 0 - # Upload segments smaller then opt_dict.max_seg_size - if len(tar_chunk) < opt_dict.max_seg_size: + # Upload segments smaller then opt_dict.max_segment_size + if len(tar_chunk) < opt_dict.max_segment_size: backup_queue.put( ({("%08d" % file_chunk_index): tar_chunk})) diff --git a/freezer/utils.py b/freezer/utils.py index d8b34da7..22ac9c33 100644 --- a/freezer/utils.py +++ b/freezer/utils.py @@ -82,7 +82,7 @@ def gen_manifest_meta( manifest_meta_dict['x-object-meta-backup-name'] = \ backup_opt_dict.backup_name manifest_meta_dict['x-object-meta-src-file-to-backup'] = \ - backup_opt_dict.src_file + backup_opt_dict.path_to_backup manifest_meta_dict['x-object-meta-abs-file-path'] = '' # Set manifest meta if encrypt_pass_file is provided @@ -92,17 +92,17 @@ def gen_manifest_meta( if backup_opt_dict.encrypt_pass_file is False: manifest_meta_dict['x-object-meta-encrypt-data'] = '' manifest_meta_dict['x-object-meta-always-backup-level'] = '' - if backup_opt_dict.always_backup_level: + if backup_opt_dict.always_level: manifest_meta_dict['x-object-meta-always-backup-level'] = \ - backup_opt_dict.always_backup_level + backup_opt_dict.always_level - # Set manifest meta if max_backup_level argument is provided - # Once the incremental backup arrive to max_backup_level, it will + # Set manifest meta if max_level argument is provided + # Once the incremental backup arrive to max_level, it will # restart from level 0 manifest_meta_dict['x-object-meta-maximum-backup-level'] = '' - if backup_opt_dict.max_backup_level is not False: + if backup_opt_dict.max_level is not False: manifest_meta_dict['x-object-meta-maximum-backup-level'] = \ - str(backup_opt_dict.max_backup_level) + str(backup_opt_dict.max_level) # At the end of the execution, checks the objects ages for the # specified swift container. If there are object older then the @@ -114,7 +114,7 @@ def gen_manifest_meta( = '{0}'.format(backup_opt_dict.remove_older_than) manifest_meta_dict['x-object-meta-hostname'] = backup_opt_dict.hostname manifest_meta_dict['x-object-meta-segments-size-bytes'] = \ - str(backup_opt_dict.max_seg_size) + str(backup_opt_dict.max_segment_size) manifest_meta_dict['x-object-meta-backup-created-timestamp'] = \ str(backup_opt_dict.time_stamp) manifest_meta_dict['x-object-meta-providers-list'] = 'HP' @@ -130,14 +130,14 @@ def gen_manifest_meta( manifest_meta_dict['x-object-meta-container-segments'] = \ backup_opt_dict.container_segments - # Set the restart_always_backup value to n days. According - # to the following option, when the always_backup_level is set + # Set the restart_always_level value to n days. According + # to the following option, when the always_level is set # the backup will be reset to level 0 if the current backup # times tamp is older then the days in x-object-meta-container-segments manifest_meta_dict['x-object-meta-restart-always-backup'] = '' - if backup_opt_dict.restart_always_backup is not False: + if backup_opt_dict.restart_always_level is not False: manifest_meta_dict['x-object-meta-restart-always-backup'] = \ - backup_opt_dict.restart_always_backup + backup_opt_dict.restart_always_level return ( backup_opt_dict, manifest_meta_dict, @@ -354,11 +354,11 @@ def get_abs_oldest_backup(backup_opt_dict): def eval_restart_backup(backup_opt_dict): ''' - Restart backup level if the first backup execute with always_backup_level - is older then restart_always_backup + Restart backup level if the first backup execute with always_level + is older then restart_always_level ''' - if not backup_opt_dict.restart_always_backup: + if not backup_opt_dict.restart_always_level: logging.info('[*] No need to set Backup {0} to level 0.'.format( backup_opt_dict.backup_name)) return False @@ -366,7 +366,7 @@ def eval_restart_backup(backup_opt_dict): logging.info('[*] Checking always backup level timestamp...') # Compute the amount of seconds to be compared with # the remote backup timestamp - max_time = int(float(backup_opt_dict.restart_always_backup) * 86400) + max_time = int(float(backup_opt_dict.restart_always_level) * 86400) current_timestamp = backup_opt_dict.time_stamp backup_name = backup_opt_dict.backup_name hostname = backup_opt_dict.hostname @@ -390,7 +390,7 @@ def eval_restart_backup(backup_opt_dict): if (current_timestamp - first_backup_ts) > max_time: logging.info( '[*] Backup {0} older then {1} days. Backup level set to 0'.format( - backup_name, backup_opt_dict.restart_always_backup)) + backup_name, backup_opt_dict.restart_always_level)) return True else: @@ -438,32 +438,32 @@ def set_backup_level(backup_opt_dict, manifest_meta_dict): if manifest_meta_dict.get('x-object-meta-backup-name'): backup_opt_dict.curr_backup_level = int( manifest_meta_dict.get('x-object-meta-backup-current-level')) - max_backup_level = manifest_meta_dict.get( + max_level = manifest_meta_dict.get( 'x-object-meta-maximum-backup-level') - always_backup_level = manifest_meta_dict.get( + always_level = manifest_meta_dict.get( 'x-object-meta-always-backup-level') - restart_always_backup = manifest_meta_dict.get( + restart_always_level = manifest_meta_dict.get( 'x-object-meta-restart-always-backup') - if max_backup_level: - max_backup_level = int(max_backup_level) - if backup_opt_dict.curr_backup_level < max_backup_level: + if max_level: + max_level = int(max_level) + if backup_opt_dict.curr_backup_level < max_level: backup_opt_dict.curr_backup_level += 1 manifest_meta_dict['x-object-meta-backup-current-level'] = \ str(backup_opt_dict.curr_backup_level) else: manifest_meta_dict['x-object-meta-backup-current-level'] = \ backup_opt_dict.curr_backup_level = '0' - elif always_backup_level: - always_backup_level = int(always_backup_level) - if backup_opt_dict.curr_backup_level < always_backup_level: + elif always_level: + always_level = int(always_level) + if backup_opt_dict.curr_backup_level < always_level: backup_opt_dict.curr_backup_level += 1 manifest_meta_dict['x-object-meta-backup-current-level'] = \ str(backup_opt_dict.curr_backup_level) - # If restart_always_backup is set, the backup_age will be computed - # and if the backup age in days is >= restart_always_backup, then + # If restart_always_level is set, the backup_age will be computed + # and if the backup age in days is >= restart_always_level, then # backup-current-level will be set to 0 - if restart_always_backup: - backup_opt_dict.restart_always_backup = restart_always_backup + if restart_always_level: + backup_opt_dict.restart_always_level = restart_always_level if eval_restart_backup(backup_opt_dict): backup_opt_dict.curr_backup_level = '0' manifest_meta_dict['x-object-meta-backup-current-level'] \ diff --git a/tests/commons.py b/tests/commons.py index 4a4db520..98e55a7e 100644 --- a/tests/commons.py +++ b/tests/commons.py @@ -637,7 +637,7 @@ def __init__(self): return None class Connection: - def __init__(self, key=True, os_options=True, auth_version=True, user=True, authurl=True, tenant_name=True, retries=True, insecure=True): + def __init__(self, key=True, os_options=True, os_auth_ver=True, user=True, authurl=True, tenant_name=True, retries=True, insecure=True): return None def put_object(self, opt1=True, opt2=True, opt3=True, opt4=True, opt5=True, headers=True, content_length=True, content_type=True): @@ -699,7 +699,7 @@ def __init__(self): fakeclient = FakeSwiftClient() fakeconnector = fakeclient.client() fakeswclient = fakeconnector.Connection() - self.mysql_conf_file = '/tmp/freezer-test-conf-file' + self.mysql_conf = '/tmp/freezer-test-conf-file' self.mysql_db_inst = FakeMySQLdb() self.lvm_auto_snap = '/dev/null' self.lvm_volgroup = 'testgroup' @@ -717,28 +717,28 @@ def __init__(self): self.backup_name = 'test-backup-name' self.hostname = 'test-hostname' self.curr_backup_level = 0 - self.src_file = '/tmp' + self.path_to_backup = '/tmp' self.tar_path = 'true' self.dereference_symlink = 'true' self.no_incremental = 'true' self.exclude = 'true' self.encrypt_pass_file = 'true' self.openssl_path = 'true' - self.always_backup_level = '0' - self.max_backup_level = '0' + self.always_level = '0' + self.max_level = '0' self.remove_older_than = '0' - self.max_seg_size = '0' + self.max_segment_size = '0' self.time_stamp = 123456789 self.container_segments = 'test-container-segments' self.container = 'test-container' self.workdir = '/tmp' self.upload = 'true' self.sw_connector = fakeswclient - self.max_backup_level = '20' + self.max_level = '20' self.encrypt_pass_file = '/dev/random' - self.always_backup_level = '20' + self.always_level = '20' self.remove_from_date = '2014-12-03T23:23:23' - self.restart_always_backup = 100000 + self.restart_always_level = 100000 self.remote_match_backup = [ 'test-hostname_test-backup-name_1234567_0', 'test-hostname_test-backup-name_1234568_1', @@ -768,13 +768,13 @@ def __init__(self): self.containers_list = [ {'name' : 'testcontainer1', 'bytes' : 123423, 'count' : 10} ] - self.list_container = False + self.list_containers = False self.list_objects = False self.restore_from_date = '2014-12-03T23:23:23' self.restore_from_host = 'test-hostname' self.action = 'info' self.insecure = True - self.auth_version = 2 + self.os_auth_ver = 2 self.dry_run = False self.upload_limit = -1 self.download_limit = -1 @@ -999,7 +999,7 @@ def fake_get_containers_list1(self, backup_opt): return backup_opt def fake_get_containers_list2(self, backup_opt): - backup_opt.list_container = None + backup_opt.list_containers = None backup_opt.list_objects = None return backup_opt diff --git a/tests/scenario/backup_scenario.py b/tests/scenario/backup_scenario.py index 6b88a538..f53d6cc5 100644 --- a/tests/scenario/backup_scenario.py +++ b/tests/scenario/backup_scenario.py @@ -189,7 +189,7 @@ def test_no_lvm_level0(self): backup_args = { #'proxy' : '', 'action' : 'backup', - 'src_file' : copy(self.tmp_path), + 'path_to_backup' : copy(self.tmp_path), 'backup_name' : str(uuid.uuid4()), 'container' : str(uuid.uuid4()) } @@ -249,7 +249,7 @@ def test_lvm_level0(self): 'lvm_volgroup' : 'freezer-test1-volgroup', 'lvm_snapsize' : '1M', 'exclude' : '*.lock', - 'src_file' : copy(self.tmp_path), + 'path_to_backup' : copy(self.tmp_path), 'backup_name' : str(uuid.uuid4()), 'container' : str(uuid.uuid4()) } @@ -328,7 +328,7 @@ def test_bandwith_limit(self): # Freezer CLI for backup argument dictionary backup_args = { 'action' : 'backup', - 'src_file' : copy(self.tmp_path), + 'path_to_backup' : copy(self.tmp_path), 'backup_name' : str(uuid.uuid4()), 'container' : str(uuid.uuid4()), 'upload_limit' : speed_limit_bytes @@ -391,10 +391,10 @@ def test_lvm_incremental_level5(self): 'lvm_dirmount' : '/tmp/freezer-test-lvm-snapshot', 'lvm_volgroup' : 'freezer-test1-volgroup', 'lvm_snapsize' : '1M', - 'src_file' : copy(self.tmp_path), + 'path_to_backup' : copy(self.tmp_path), 'backup_name' : str(uuid.uuid4()), 'container' : str(uuid.uuid4()), - 'max_backup_level' : max_level + 'max_level' : max_level } fdict_before = [] # print '' diff --git a/tests/test_backup.py b/tests/test_backup.py index e90318e2..69863db3 100644 --- a/tests/test_backup.py +++ b/tests/test_backup.py @@ -59,13 +59,13 @@ def test_backup_mode_mysql(self, monkeypatch): monkeypatch.setattr(os.path, 'exists', expanduser.exists) monkeypatch.setattr(swiftclient, 'client', fakeswiftclient.client) - mysql_conf_file = backup_opt.mysql_conf_file - backup_opt.__dict__['mysql_conf_file'] = None + mysql_conf = backup_opt.mysql_conf + backup_opt.__dict__['mysql_conf'] = None pytest.raises(Exception, backup_mode_mysql, backup_opt, 123456789, test_meta) # Generate mysql conf test file - backup_opt.__dict__['mysql_conf_file'] = mysql_conf_file - with open(backup_opt.mysql_conf_file, 'w') as mysql_conf_fd: + backup_opt.__dict__['mysql_conf'] = mysql_conf + with open(backup_opt.mysql_conf, 'w') as mysql_conf_fd: mysql_conf_fd.write('host=abcd\nport=1234\nuser=abcd\npassword=abcd\n') assert backup_mode_mysql( backup_opt, 123456789, test_meta) is None @@ -73,7 +73,7 @@ def test_backup_mode_mysql(self, monkeypatch): fakemysql2 = FakeMySQLdb2() monkeypatch.setattr(MySQLdb, 'connect', fakemysql2.connect) pytest.raises(Exception, backup_mode_mysql, backup_opt, 123456789, test_meta) - os.unlink(backup_opt.mysql_conf_file) + os.unlink(backup_opt.mysql_conf) def test_backup_mode_fs(self, monkeypatch): diff --git a/tests/test_job.py b/tests/test_job.py index 8c96070c..1bf65d2b 100644 --- a/tests/test_job.py +++ b/tests/test_job.py @@ -59,10 +59,10 @@ def test_execute_nothing_to_do(self, monkeypatch): job = InfoJob(backup_opt) assert job.execute() is False - def test_execute_list_container(self, monkeypatch): + def test_execute_list_containers(self, monkeypatch): self.do_monkeypatch(monkeypatch) backup_opt = BackupOpt1() - backup_opt.list_container = True + backup_opt.list_containers = True job = InfoJob(backup_opt) assert job.execute() is True diff --git a/tests/test_swift.py b/tests/test_swift.py index a880f4f1..a39b3c4a 100644 --- a/tests/test_swift.py +++ b/tests/test_swift.py @@ -58,10 +58,10 @@ def test_show_containers(self, monkeypatch): monkeypatch.setattr(logging, 'exception', fakelogging.exception) monkeypatch.setattr(logging, 'error', fakelogging.error) - backup_opt.__dict__['list_container'] = True + backup_opt.__dict__['list_containers'] = True assert show_containers(backup_opt) is True - backup_opt.__dict__['list_container'] = False + backup_opt.__dict__['list_containers'] = False assert show_containers(backup_opt) is False def test_show_objects(self, monkeypatch): diff --git a/tests/test_tar.py b/tests/test_tar.py index cea5188d..f08728d1 100644 --- a/tests/test_tar.py +++ b/tests/test_tar.py @@ -130,7 +130,7 @@ def test_gen_tar_command(self, monkeypatch): assert val2 is not False assert val3 is not False - backup_opt.__dict__['src_file'] = '' + backup_opt.__dict__['path_to_backup'] = '' pytest.raises(Exception, gen_tar_command, backup_opt, meta_data_backup_file, time_stamp, remote_manifest_meta) @@ -154,7 +154,7 @@ def test_tar_backup(self, monkeypatch): monkeypatch.setattr(logging, 'exception', fakelogging.exception) monkeypatch.setattr(logging, 'error', fakelogging.error) - backup_opt.__dict__['max_seg_size'] = 1 + backup_opt.__dict__['max_segment_size'] = 1 assert tar_backup(backup_opt, 'tar_command', fakebackup_queue) is not False def test_tar_restore_args_valid(self, monkeypatch): diff --git a/tests/test_utils.py b/tests/test_utils.py index d847239d..ebae43bf 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -148,7 +148,7 @@ def test_eval_restart_backup(self, monkeypatch): backup_opt = BackupOpt1() assert eval_restart_backup(backup_opt) is False - backup_opt.__dict__['restart_always_backup'] = None + backup_opt.__dict__['restart_always_level'] = None assert eval_restart_backup(backup_opt) is False backup_opt = BackupOpt1() @@ -205,8 +205,8 @@ def test_set_backup_level(self): manifest_meta = dict() backup_opt = BackupOpt1() - backup_opt.__dict__['max_backup_level'] = False - backup_opt.__dict__['always_backup_level'] = False + backup_opt.__dict__['max_level'] = False + backup_opt.__dict__['always_level'] = False (backup_opt, manifest_meta) = set_backup_level( backup_opt, manifest_meta) assert manifest_meta['x-object-meta-backup-current-level'] == '0'