Skip to content

Commit

Permalink
Consistent opt args and variable to dentify that same opt arg
Browse files Browse the repository at this point in the history
The freezerc was using an inconsistent naming convenstion
between option arguments and variable to the same opt argument

i.e,:
--path-to-backup -> (backup_opt_dict.src_file)

Change-Id: I6e70e60797742054e0e5e10b7b2398bc42354e3b
Implements: blueprint consistent-args
  • Loading branch information
pizzatrader committed May 27, 2015
1 parent 79e5c3c commit c7f71ff
Show file tree
Hide file tree
Showing 14 changed files with 99 additions and 97 deletions.
20 changes: 10 additions & 10 deletions freezer/arguments.py
Expand Up @@ -78,7 +78,7 @@ def backup_arguments(args_dict={}):
arg_parser.add_argument(
'-F', '--path-to-backup', '--file-to-backup', action='store',
help="The file or directory you want to back up to Swift",
dest='src_file', default=False)
dest='path_to_backup', default=False)
arg_parser.add_argument(
'-N', '--backup-name', action='store',
help="The backup name you want to use to identify your backup \
Expand All @@ -96,15 +96,15 @@ def backup_arguments(args_dict={}):
arg_parser.add_argument(
'-L', '--list-containers', action='store_true',
help='''List the Swift containers on remote Object Storage Server''',
dest='list_container', default=False)
dest='list_containers', default=False)
arg_parser.add_argument(
'-l', '--list-objects', action='store_true',
help='''List the Swift objects stored in a container on remote Object\
Storage Server.''', dest='list_objects', default=False)
arg_parser.add_argument(
'-o', '--get-object', action='store',
help="The Object name you want to download on the local file system.",
dest='object', default=False)
dest='get_object', default=False)
arg_parser.add_argument(
'-d', '--dst-file', action='store',
help="The file name used to save the object on your local disk and\
Expand Down Expand Up @@ -143,7 +143,7 @@ def backup_arguments(args_dict={}):
help="Set the backup level used with tar to implement incremental \
backup. If a level 1 is specified but no level 0 is already \
available, a level 0 will be done and subsequently backs to level 1.\
Default 0 (No Incremental)", dest='max_backup_level',
Default 0 (No Incremental)", dest='max_level',
type=int, default=False)
arg_parser.add_argument(
'--always-level', action='store', help="Set backup\
Expand All @@ -152,14 +152,14 @@ def backup_arguments(args_dict={}):
level 3 and to that point always a backup level 3 will be executed. \
It will not restart from level 0. This option has precedence over \
--max-backup-level. Default False (Disabled)",
dest='always_backup_level', type=int, default=False)
dest='always_level', type=int, default=False)
arg_parser.add_argument(
'--restart-always-level', action='store', help="Restart the backup \
from level 0 after n days. Valid only if --always-level option \
if set. If --always-level is used together with --remove-older-then, \
there might be the chance where the initial level 0 will be removed \
Default False (Disabled)",
dest='restart_always_backup', type=float, default=False)
dest='restart_always_level', type=float, default=False)
arg_parser.add_argument(
'-R', '--remove-older-then', '--remove-older-than', action='store',
help=('Checks in the specified container for object older than the '
Expand Down Expand Up @@ -199,7 +199,7 @@ def backup_arguments(args_dict={}):
user = <mysqluser>
password = <mysqlpass>
port = <db-port>''',
dest='mysql_conf_file', default=False)
dest='mysql_conf', default=False)
if is_windows():
arg_parser.add_argument(
'--log-file', action='store',
Expand Down Expand Up @@ -238,7 +238,7 @@ def backup_arguments(args_dict={}):
'-M', '--max-segment-size', action='store',
help="Set the maximum file chunk size in bytes to upload to swift\
Default 67108864 bytes (64MB)",
dest='max_seg_size', type=int, default=67108864)
dest='max_segment_size', type=int, default=67108864)
arg_parser.add_argument(
'--restore-abs-path', action='store',
help=('Set the absolute path where you want your data restored. '
Expand Down Expand Up @@ -279,7 +279,7 @@ def backup_arguments(args_dict={}):
'--os-auth-ver', choices=['1', '2', '3'],
action='store',
help='Swift auth version, could be 1, 2 or 3',
dest='auth_version', default=2)
dest='os_auth_ver', default=2)
arg_parser.add_argument(
'--proxy', action='store',
help='''Enforce proxy that alters system HTTP_PROXY and HTTPS_PROXY,
Expand Down Expand Up @@ -314,7 +314,7 @@ def backup_arguments(args_dict={}):
the sql server instance.
Following is an example of config file:
instance = <db-instance>''',
dest='sql_server_config', default=False)
dest='sql_server_conf', default=False)
arg_parser.add_argument(
'--volume', action='store',
help='Create a snapshot of the selected volume',
Expand Down
17 changes: 9 additions & 8 deletions freezer/backup.py
Expand Up @@ -53,7 +53,7 @@ def backup_mode_sql_server(backup_opt_dict, time_stamp, manifest_meta_dict):
as the backup finish the db will be unlocked and the backup will be
uploaded. A sql_server.conf_file is required for this operation.
"""
with open(backup_opt_dict.sql_server_config, 'r') as sql_conf_file_fd:
with open(backup_opt_dict.sql_server_conf, 'r') as sql_conf_file_fd:
for line in sql_conf_file_fd:
if 'instance' in line:
db_instance = line.split('=')[1].strip()
Expand All @@ -76,7 +76,7 @@ def backup_mode_mysql(backup_opt_dict, time_stamp, manifest_meta_dict):
the db tables will be flushed and locked for read, then the lvm create
command will be executed and after that, the table will be unlocked and
the backup will be executed. It is important to have the available in
backup_args.mysql_conf_file the file where the database host, name, user,
backup_args.mysql_conf the file where the database host, name, user,
password and port are set.
"""

Expand All @@ -85,14 +85,14 @@ def backup_mode_mysql(backup_opt_dict, time_stamp, manifest_meta_dict):
except ImportError:
raise ImportError('Please install PyMySQL module')

if not backup_opt_dict.mysql_conf_file:
if not backup_opt_dict.mysql_conf:
raise ValueError('MySQL: please provide a valid config file')
# Open the file provided in backup_args.mysql_conf_file and extract the
# Open the file provided in backup_args.mysql_conf and extract the
# db host, name, user, password and port.
db_user = db_host = db_pass = False
# Use the default mysql port if not provided
db_port = 3306
with open(backup_opt_dict.mysql_conf_file, 'r') as mysql_file_fd:
with open(backup_opt_dict.mysql_conf, 'r') as mysql_file_fd:
for line in mysql_file_fd:
if 'host' in line:
db_host = line.split('=')[1].strip()
Expand Down Expand Up @@ -223,9 +223,10 @@ def backup_mode_fs(backup_opt_dict, time_stamp, manifest_meta_dict):
tar_backup_queue = multiprocessing.Queue(maxsize=2)

if is_windows():
backup_opt_dict.absolute_path = backup_opt_dict.src_file
backup_opt_dict.src_file = use_shadow(backup_opt_dict.src_file,
backup_opt_dict.volume)
backup_opt_dict.absolute_path = backup_opt_dict.path_to_backup
backup_opt_dict.path_to_backup = use_shadow(
backup_opt_dict.path_to_backup,
backup_opt_dict.volume)

# Execute a tar gzip of the specified directory and return
# small chunks (default 128MB), timestamp, backup, filename,
Expand Down
6 changes: 3 additions & 3 deletions freezer/job.py
Expand Up @@ -58,7 +58,7 @@ def wrapper(self):
class InfoJob(Job):
@Job.executemethod
def execute(self):
if self.conf.list_container:
if self.conf.list_containers:
swift.show_containers(self.conf)
elif self.conf.list_objects:
containers = swift.check_container_existance(self.conf)
Expand All @@ -85,8 +85,8 @@ def execute(self):
swift.create_containers(self.conf)

if self.conf.no_incremental:
if self.conf.max_backup_level or \
self.conf.always_backup_level:
if self.conf.max_level or \
self.conf.always_level:
raise Exception(
'no-incremental option is not compatible '
'with backup level options')
Expand Down
5 changes: 3 additions & 2 deletions freezer/lvm.py
Expand Up @@ -202,11 +202,12 @@ def lvm_snap(backup_opt_dict):

def get_lvm_info(backup_opt_dict):
"""
Take a file system path as argument as backup_opt_dict.src_file
Take a file system path as argument as backup_opt_dict.path_to_backup
and return a dictionary containing dictionary['lvm_srcvol']
and dictionary['lvm_volgroup'] where the path is mounted on.
:param backup_opt_dict: backup_opt_dict.src_file, the file system path
:param backup_opt_dict: backup_opt_dict.path_to_backup, the file system
path
:returns: the dictionary backup_opt_dict containing keys lvm_srcvol
and lvm_volgroup with respective values
"""
Expand Down
6 changes: 3 additions & 3 deletions freezer/swift.py
Expand Up @@ -65,7 +65,7 @@ def show_containers(backup_opt_dict):
Print remote containers in sorted order
"""

if not backup_opt_dict.list_container:
if not backup_opt_dict.list_containers:
return False

ordered_container = {}
Expand Down Expand Up @@ -312,7 +312,7 @@ def get_client(backup_opt_dict):
user=options.user_name, key=options.password,
tenant_name=options.tenant_name,
os_options=options.os_options,
auth_version=backup_opt_dict.auth_version,
auth_version=backup_opt_dict.os_auth_ver,
insecure=backup_opt_dict.insecure, retries=6)

if backup_opt_dict.dry_run:
Expand Down Expand Up @@ -423,7 +423,7 @@ def add_object(
break
package_name = u'{0}/{1}/{2}/{3}'.format(
package_name, time_stamp,
backup_opt_dict.max_seg_size, file_chunk_index)
backup_opt_dict.max_segment_size, file_chunk_index)
add_chunk(backup_opt_dict, package_name, file_chunk)


Expand Down
20 changes: 10 additions & 10 deletions freezer/tar.py
Expand Up @@ -161,20 +161,20 @@ def gen_tar_command(

required_list = [
opt_dict.backup_name,
opt_dict.src_file,
os.path.exists(opt_dict.src_file)]
opt_dict.path_to_backup,
os.path.exists(opt_dict.path_to_backup)]

if not validate_all_args(required_list):
raise Exception('Error: Please ALL the following options: '
'--path-to-backup, --backup-name')

# Change che current working directory to op_dict.src_file
os.chdir(os.path.normpath(opt_dict.src_file.strip()))
# Change che current working directory to op_dict.path_to_backup
os.chdir(os.path.normpath(opt_dict.path_to_backup.strip()))

logging.info('[*] Changing current working directory to: {0} \
'.format(opt_dict.src_file))
'.format(opt_dict.path_to_backup))
logging.info('[*] Backup started for: {0} \
'.format(opt_dict.src_file))
'.format(opt_dict.path_to_backup))

# Tar option for default behavior. Please refer to man tar to have
# a better options explanation
Expand Down Expand Up @@ -232,7 +232,7 @@ def tar_backup(opt_dict, tar_command, backup_queue):
tar_chunk = b''
logging.info(
'[*] Archiving and compressing files from {0}'.format(
opt_dict.src_file))
opt_dict.path_to_backup))

tar_process = subprocess.Popen(
tar_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
Expand All @@ -242,14 +242,14 @@ def tar_backup(opt_dict, tar_command, backup_queue):
for file_block in tar_process.stdout:
tar_chunk += file_block
file_read_limit += len(file_block)
if file_read_limit >= opt_dict.max_seg_size:
if file_read_limit >= opt_dict.max_segment_size:
backup_queue.put(
({("%08d" % file_chunk_index): tar_chunk}))
file_chunk_index += 1
tar_chunk = b''
file_read_limit = 0

# Upload segments smaller then opt_dict.max_seg_size
if len(tar_chunk) < opt_dict.max_seg_size:
# Upload segments smaller then opt_dict.max_segment_size
if len(tar_chunk) < opt_dict.max_segment_size:
backup_queue.put(
({("%08d" % file_chunk_index): tar_chunk}))
60 changes: 30 additions & 30 deletions freezer/utils.py
Expand Up @@ -82,7 +82,7 @@ def gen_manifest_meta(
manifest_meta_dict['x-object-meta-backup-name'] = \
backup_opt_dict.backup_name
manifest_meta_dict['x-object-meta-src-file-to-backup'] = \
backup_opt_dict.src_file
backup_opt_dict.path_to_backup
manifest_meta_dict['x-object-meta-abs-file-path'] = ''

# Set manifest meta if encrypt_pass_file is provided
Expand All @@ -92,17 +92,17 @@ def gen_manifest_meta(
if backup_opt_dict.encrypt_pass_file is False:
manifest_meta_dict['x-object-meta-encrypt-data'] = ''
manifest_meta_dict['x-object-meta-always-backup-level'] = ''
if backup_opt_dict.always_backup_level:
if backup_opt_dict.always_level:
manifest_meta_dict['x-object-meta-always-backup-level'] = \
backup_opt_dict.always_backup_level
backup_opt_dict.always_level

# Set manifest meta if max_backup_level argument is provided
# Once the incremental backup arrive to max_backup_level, it will
# Set manifest meta if max_level argument is provided
# Once the incremental backup arrive to max_level, it will
# restart from level 0
manifest_meta_dict['x-object-meta-maximum-backup-level'] = ''
if backup_opt_dict.max_backup_level is not False:
if backup_opt_dict.max_level is not False:
manifest_meta_dict['x-object-meta-maximum-backup-level'] = \
str(backup_opt_dict.max_backup_level)
str(backup_opt_dict.max_level)

# At the end of the execution, checks the objects ages for the
# specified swift container. If there are object older then the
Expand All @@ -114,7 +114,7 @@ def gen_manifest_meta(
= '{0}'.format(backup_opt_dict.remove_older_than)
manifest_meta_dict['x-object-meta-hostname'] = backup_opt_dict.hostname
manifest_meta_dict['x-object-meta-segments-size-bytes'] = \
str(backup_opt_dict.max_seg_size)
str(backup_opt_dict.max_segment_size)
manifest_meta_dict['x-object-meta-backup-created-timestamp'] = \
str(backup_opt_dict.time_stamp)
manifest_meta_dict['x-object-meta-providers-list'] = 'HP'
Expand All @@ -130,14 +130,14 @@ def gen_manifest_meta(
manifest_meta_dict['x-object-meta-container-segments'] = \
backup_opt_dict.container_segments

# Set the restart_always_backup value to n days. According
# to the following option, when the always_backup_level is set
# Set the restart_always_level value to n days. According
# to the following option, when the always_level is set
# the backup will be reset to level 0 if the current backup
# times tamp is older then the days in x-object-meta-container-segments
manifest_meta_dict['x-object-meta-restart-always-backup'] = ''
if backup_opt_dict.restart_always_backup is not False:
if backup_opt_dict.restart_always_level is not False:
manifest_meta_dict['x-object-meta-restart-always-backup'] = \
backup_opt_dict.restart_always_backup
backup_opt_dict.restart_always_level

return (
backup_opt_dict, manifest_meta_dict,
Expand Down Expand Up @@ -354,19 +354,19 @@ def get_abs_oldest_backup(backup_opt_dict):

def eval_restart_backup(backup_opt_dict):
'''
Restart backup level if the first backup execute with always_backup_level
is older then restart_always_backup
Restart backup level if the first backup execute with always_level
is older then restart_always_level
'''

if not backup_opt_dict.restart_always_backup:
if not backup_opt_dict.restart_always_level:
logging.info('[*] No need to set Backup {0} to level 0.'.format(
backup_opt_dict.backup_name))
return False

logging.info('[*] Checking always backup level timestamp...')
# Compute the amount of seconds to be compared with
# the remote backup timestamp
max_time = int(float(backup_opt_dict.restart_always_backup) * 86400)
max_time = int(float(backup_opt_dict.restart_always_level) * 86400)
current_timestamp = backup_opt_dict.time_stamp
backup_name = backup_opt_dict.backup_name
hostname = backup_opt_dict.hostname
Expand All @@ -390,7 +390,7 @@ def eval_restart_backup(backup_opt_dict):
if (current_timestamp - first_backup_ts) > max_time:
logging.info(
'[*] Backup {0} older then {1} days. Backup level set to 0'.format(
backup_name, backup_opt_dict.restart_always_backup))
backup_name, backup_opt_dict.restart_always_level))

return True
else:
Expand Down Expand Up @@ -438,32 +438,32 @@ def set_backup_level(backup_opt_dict, manifest_meta_dict):
if manifest_meta_dict.get('x-object-meta-backup-name'):
backup_opt_dict.curr_backup_level = int(
manifest_meta_dict.get('x-object-meta-backup-current-level'))
max_backup_level = manifest_meta_dict.get(
max_level = manifest_meta_dict.get(
'x-object-meta-maximum-backup-level')
always_backup_level = manifest_meta_dict.get(
always_level = manifest_meta_dict.get(
'x-object-meta-always-backup-level')
restart_always_backup = manifest_meta_dict.get(
restart_always_level = manifest_meta_dict.get(
'x-object-meta-restart-always-backup')
if max_backup_level:
max_backup_level = int(max_backup_level)
if backup_opt_dict.curr_backup_level < max_backup_level:
if max_level:
max_level = int(max_level)
if backup_opt_dict.curr_backup_level < max_level:
backup_opt_dict.curr_backup_level += 1
manifest_meta_dict['x-object-meta-backup-current-level'] = \
str(backup_opt_dict.curr_backup_level)
else:
manifest_meta_dict['x-object-meta-backup-current-level'] = \
backup_opt_dict.curr_backup_level = '0'
elif always_backup_level:
always_backup_level = int(always_backup_level)
if backup_opt_dict.curr_backup_level < always_backup_level:
elif always_level:
always_level = int(always_level)
if backup_opt_dict.curr_backup_level < always_level:
backup_opt_dict.curr_backup_level += 1
manifest_meta_dict['x-object-meta-backup-current-level'] = \
str(backup_opt_dict.curr_backup_level)
# If restart_always_backup is set, the backup_age will be computed
# and if the backup age in days is >= restart_always_backup, then
# If restart_always_level is set, the backup_age will be computed
# and if the backup age in days is >= restart_always_level, then
# backup-current-level will be set to 0
if restart_always_backup:
backup_opt_dict.restart_always_backup = restart_always_backup
if restart_always_level:
backup_opt_dict.restart_always_level = restart_always_level
if eval_restart_backup(backup_opt_dict):
backup_opt_dict.curr_backup_level = '0'
manifest_meta_dict['x-object-meta-backup-current-level'] \
Expand Down

0 comments on commit c7f71ff

Please sign in to comment.