Skip to content

Commit

Permalink
Refactor master (#81)
Browse files Browse the repository at this point in the history
Stylistic cleanup. @pheanex
  • Loading branch information
PHeanEX authored and Oleksii Kliukin committed Mar 22, 2017
1 parent 21fb917 commit d64ca0f
Show file tree
Hide file tree
Showing 12 changed files with 83 additions and 78 deletions.
20 changes: 8 additions & 12 deletions pg_view/__init__.py
Expand Up @@ -114,7 +114,6 @@ def poll_keys(screen, output):
def do_loop(screen, groups, output_method, collectors, consumer):
""" Display output (or pass it through to ncurses) """

output = None
if output_method == OUTPUT_METHOD.curses:
if screen is None:
logger.error('No parent screen is passed to the curses application')
Expand All @@ -131,17 +130,15 @@ def do_loop(screen, groups, output_method, collectors, consumer):
# process input:
consumer.consume()
for st in collectors:
if output_method == OUTPUT_METHOD.curses:
if not poll_keys(screen, output):
# bail out immediately
return
if output_method == OUTPUT_METHOD.curses and not poll_keys(screen, output):
# bail out immediately
return
st.set_units_display(flags.display_units)
st.set_ignore_autohide(not flags.autohide_fields)
st.set_notrim(flags.notrim)
process_single_collector(st)
if output_method == OUTPUT_METHOD.curses:
if not poll_keys(screen, output):
return
if output_method == OUTPUT_METHOD.curses and not poll_keys(screen, output):
return

if output_method == OUTPUT_METHOD.curses:
process_groups(groups)
Expand Down Expand Up @@ -182,7 +179,7 @@ def main():

if output_method == OUTPUT_METHOD.curses and not curses_available:
print('Curses output is selected, but curses are unavailable, falling back to console output')
output_method == OUTPUT_METHOD.console
output_method = OUTPUT_METHOD.console

# set basic logging
setup_logger(options)
Expand All @@ -192,7 +189,7 @@ def main():
clusters = []

# now try to read the configuration file
config = (read_configuration(options.config_file) if options.config_file else None)
config = read_configuration(options.config_file) if options.config_file else None
if config:
for instance in config:
if user_dbname and instance != user_dbname:
Expand All @@ -207,7 +204,6 @@ def main():
logger.error('failed to acquire details about ' +
'the database cluster {0}, the server will be skipped'.format(instance))
elif options.host:
port = options.port or "5432"
# try to connet to the database specified by command-line options
conn = build_connection(options.host, options.port, options.username, options.dbname)
instance = options.instance or "default"
Expand Down Expand Up @@ -286,7 +282,7 @@ def main():


def setup_logger(options):
logger.setLevel((logging.INFO if options.verbose else logging.ERROR))
logger.setLevel(logging.INFO if options.verbose else logging.ERROR)
if options.log_file:
LOG_FILE_NAME = options.log_file
# truncate the former logs
Expand Down
42 changes: 22 additions & 20 deletions pg_view/collectors/base_collector.py
Expand Up @@ -101,23 +101,23 @@ def validate_list_out(l):

@staticmethod
def ticks_to_seconds(tick_value_str):
return (float(tick_value_str) / StatCollector.USER_HZ if tick_value_str is not None else None)
return float(tick_value_str) / StatCollector.USER_HZ if tick_value_str is not None else None

@staticmethod
def bytes_to_mbytes(bytes_val):
return (float(bytes_val) / 1048576 if bytes_val is not None else None)
return float(bytes_val) / 1048576 if bytes_val is not None else None

@staticmethod
def sectors_to_mbytes(sectors):
return (float(sectors) / 2048 if sectors is not None else None)
return float(sectors) / 2048 if sectors is not None else None

@staticmethod
def kb_to_mbytes(kb):
return (float(kb) / 1024 if kb is not None else None)
return float(kb) / 1024 if kb is not None else None

@staticmethod
def time_diff_to_percent(timediff_val):
return (float(timediff_val) * 100 if timediff_val is not None else None)
return float(timediff_val) * 100 if timediff_val is not None else None

@staticmethod
def format_date_from_epoch(epoch_val):
Expand All @@ -139,15 +139,14 @@ def kb_pretty_print_long(b):
d = b / n
if d:
r.append(str(d) + l)
b = b % n
b %= n
return ' '.join(r)

@staticmethod
def kb_pretty_print(b):
""" Show memory size as a float value in the biggest measurement units """

r = []
v = 0
for l, n in StatCollector.BYTE_MAP:
if b > n:
v = round(float(b) / n, 1)
Expand All @@ -160,10 +159,10 @@ def kb_pretty_print(b):

@staticmethod
def time_interval_pretty_print(start_time, is_delta):
'''Returns a human readable string that shows a time between now and the timestamp passed as an argument.
"""Returns a human readable string that shows a time between now and the timestamp passed as an argument.
The passed argument can be a timestamp (returned by time.time() call) a datetime object or a timedelta object.
In case it is a timedelta object, then it is formatted only
'''
"""

if isinstance(start_time, Number):
if is_delta:
Expand Down Expand Up @@ -222,7 +221,7 @@ def sectors_pretty_print(b):

@staticmethod
def int_lower_than_non_zero(row, col, val, bound):
return val > 0 and val < bound
return 0 < val < bound

@staticmethod
def time_field_to_seconds(val):
Expand Down Expand Up @@ -391,7 +390,8 @@ def _produce_output_row(self, row):
result[attname] = val
return result

def _produce_output_value(self, row, col, method=OUTPUT_METHOD.console):
@staticmethod
def _produce_output_value(row, col, method=OUTPUT_METHOD.console):
# get the input value
if 'in' in col:
val = row.get(col['in'], None)
Expand All @@ -414,7 +414,8 @@ def _produce_output_name(self, col):
attname += ' ' + col['units']
return attname

def _calculate_output_status(self, row, col, val, method):
@staticmethod
def _calculate_output_status(row, col, val, method):
""" Examine the current status indicators and produce the status
value for the specific column of the given row
"""
Expand Down Expand Up @@ -555,7 +556,8 @@ def _transform_dict(self, l, custom_transformation_data=None):
return result
raise Exception('No data for the dict transformation supplied')

def _transform_string(self, d):
@staticmethod
def _transform_string(d):
raise Exception('transformation of input type string is not implemented')

def _output_template_for_console(self):
Expand Down Expand Up @@ -652,7 +654,8 @@ def _calculate_statuses_for_row(self, row, method):
statuses.append(self._calculate_output_status(row, col, row[num], method))
return statuses

def _calculate_column_types(self, rows):
@staticmethod
def _calculate_column_types(rows):
result = {}
if len(rows) > 0:
colnames = rows[0].keys()
Expand Down Expand Up @@ -728,12 +731,11 @@ def ncurses_output(self, rows, before_string=None, after_string=None):

types_row = self._calculate_column_types(values_rows)

result = {}
result['rows'] = result_rows
result['statuses'] = status_rows
result['hide'] = self._get_columns_to_hide(result_rows, status_rows)
result['highlights'] = dict(zip(result_header, self._get_highlights()))
result['types'] = types_row
result = {'rows': result_rows,
'statuses': status_rows,
'hide': self._get_columns_to_hide(result_rows, status_rows),
'highlights': dict(zip(result_header, self._get_highlights())),
'types': types_row}
for x in StatCollector.NCURSES_CUSTOM_OUTPUT_FIELDS:
result[x] = self.ncurses_custom_fields.get(x, None)
for k in StatCollector.NCURSES_DEFAULTS.keys():
Expand Down
10 changes: 6 additions & 4 deletions pg_view/collectors/host_collector.py
Expand Up @@ -92,15 +92,17 @@ def _load_avg_state(self, row, col):
state[no] = COLSTATUS.cs_ok
return state

def _concat_load_avg(self, colname, row, optional):
@staticmethod
def _concat_load_avg(colname, row, optional):
""" concat all load averages into a single string """

if len(row) >= 3:
return ' '.join(str(x) for x in row[:3])
else:
return ''

def _load_avg_status(self, row, col, val, bound):
@staticmethod
def _load_avg_status(row, col, val, bound):
if val is not None:
loads = str(val).split()
if len(loads) != 3:
Expand All @@ -118,10 +120,10 @@ def _read_cpus():
cpus = cpu_count()
except:
logger.error('multiprocessing does not support cpu_count')
pass
return {'cores': cpus}

def _construct_sysname(self, attname, row, optional):
@staticmethod
def _construct_sysname(attname, row, optional):
if len(row) < 3:
return None
return '{0} {1}'.format(row[0], row[2])
Expand Down
7 changes: 4 additions & 3 deletions pg_view/collectors/memory_collector.py
Expand Up @@ -117,7 +117,8 @@ def refresh(self):
raw_result = self._transform_input(memdata)
self._do_refresh([raw_result])

def _read_memory_data(self):
@staticmethod
def _read_memory_data():
""" Read relevant data from /proc/meminfo. We are interesed in the following fields:
MemTotal, MemFree, Buffers, Cached, Dirty, CommitLimit, Committed_AS
"""
Expand All @@ -131,9 +132,9 @@ def _read_memory_data(self):
# if we have units of measurement different from kB - transform the result
if len(vals) == 3 and vals[2] in ('mB', 'gB'):
if vals[2] == 'mB':
val = val + '0' * 3
val += '0' * 3
if vals[2] == 'gB':
val = val + '0' * 6
val += '0' * 6
if len(str(name)) > 1:
result[str(name)[:-1]] = val
else:
Expand Down
23 changes: 14 additions & 9 deletions pg_view/collectors/partition_collector.py
Expand Up @@ -126,15 +126,16 @@ def __init__(self, dbname, dbversion, work_directory, consumer):
},
{'out': 'path', 'pos': 10},
]
self.ncurses_custom_fields = {'header': True}
self.ncurses_custom_fields['prefix'] = None
self.ncurses_custom_fields = {'header': True,
'prefix': None}
self.postinit()

def ident(self):
return '{0} ({1}/{2})'.format(super(PartitionStatCollector, self).ident(), self.dbname, self.dbver)

def _dereference_dev_name(self, devname):
return (devname.replace('/dev/', '') if devname else None)
@staticmethod
def _dereference_dev_name(devname):
return devname.replace('/dev/', '') if devname else None

def refresh(self):
result = {}
Expand Down Expand Up @@ -162,14 +163,18 @@ def refresh(self):

self._do_refresh([result[PartitionStatCollector.DATA_NAME], result[PartitionStatCollector.XLOG_NAME]])

def calculate_time_until_full(self, colname, prev, cur):
@staticmethod
def calculate_time_until_full(colname, prev, cur):
# both should be expressed in common units, guaranteed by BLOCK_SIZE
if cur.get('path_size', 0) > 0 and prev.get('path_size', 0) > 0 and cur.get('space_left', 0) > 0:
if cur['path_size'] < prev['path_size']:
return cur['space_left'] / (prev['path_size'] - cur['path_size'])
if (cur.get('path_size', 0) > 0 and
prev.get('path_size', 0) > 0 and
cur.get('space_left', 0) > 0 and
cur['path_size'] < prev['path_size']):
return cur['space_left'] / (prev['path_size'] - cur['path_size'])
return None

def get_io_data(self, pnames):
@staticmethod
def get_io_data(pnames):
""" Retrieve raw data from /proc/diskstat (transformations are perfromed via io_list_transformation)"""
result = {}
found = 0 # stop if we found records for all partitions
Expand Down
8 changes: 4 additions & 4 deletions pg_view/collectors/pg_collector.py
Expand Up @@ -208,8 +208,8 @@ def __init__(self, pgcon, reconnect, pid, dbname, dbver, always_track_pids):
},
]

self.ncurses_custom_fields = {'header': True}
self.ncurses_custom_fields['prefix'] = None
self.ncurses_custom_fields = {'header': True,
'prefix': None}

self.postinit()

Expand Down Expand Up @@ -275,7 +275,7 @@ def _get_psinfo(cmdline):
pstype = 'backend'
if pstype == 'autovacuum worker':
pstype = 'autovacuum'
return (pstype, action)
return pstype, action

@staticmethod
def _is_auxiliary_process(pstype):
Expand Down Expand Up @@ -369,7 +369,7 @@ def _read_proc(self, pid, is_backend, is_active):

# Assume we managed to read the row if we can get its PID
for cat in 'stat', 'io':
result.update(self._transform_input(raw_result.get(cat, ({} if cat == 'io' else []))))
result.update(self._transform_input(raw_result.get(cat, {} if cat == 'io' else [])))
# generated columns
result['cmdline'] = raw_result.get('cmd', None)
if not is_backend:
Expand Down
13 changes: 4 additions & 9 deletions pg_view/models/db_client.py
Expand Up @@ -43,7 +43,7 @@ def build_connection(host, port, user, database):
def pick_connection_arguments(conn_args, username, dbname):
""" go through all decected connections, picking the first one that actually works """
result = {}
for conn_type in ('unix', 'tcp', 'tcp6'):
for conn_type in 'unix', 'tcp', 'tcp6':
if len(result) > 0:
break
for arg in conn_args.get(conn_type, []):
Expand All @@ -65,7 +65,6 @@ def can_connect_with_connection_arguments(host, port, username, dbname):


def detect_with_proc_net(pid):
result = None
inodes = fetch_socket_inodes_for_process(pid)
parser = ProcNetParser()
result = parser.match_socket_inodes(inodes)
Expand All @@ -81,7 +80,6 @@ def detect_db_connection_arguments(work_directory, pid, version, username, dbnam
We do this by first extracting useful information from postmaster.pid,
next reading the postgresql.conf if necessary and, at last,
"""
result = {}
conn_args = detect_with_proc_net(pid)
if not conn_args:
# if we failed to detect the arguments via the /proc/net/ readings,
Expand All @@ -104,7 +102,6 @@ def establish_user_defined_connection(instance, conn, clusters):
""" connect the database and get all necessary options like pid and work_directory
we use port, host and socket_directory, prefering socket over TCP connections
"""
pgcon = None
# establish a new connection
try:
pgcon = psycopg2.connect(**conn)
Expand Down Expand Up @@ -147,7 +144,7 @@ def make_cluster_desc(name, version, workdir, pid, pgcon, conn):
def reconnect():
pgcon = psycopg2.connect(**conn)
pid = read_postmaster_pid(workdir, name)
return (pgcon, pid)
return pgcon, pid

return {
'name': name,
Expand All @@ -170,7 +167,6 @@ def get_postmasters_directories():
# make sure the particular pid is accessible to us
if not os.access(f, os.R_OK):
continue
stat_fields = []
try:
with open(f, 'rU') as fp:
stat_fields = fp.read().strip().split()
Expand Down Expand Up @@ -280,7 +276,6 @@ def detect_with_postmaster_pid(work_directory, version):
if version is None or version == 9.0:
return None
PID_FILE = '{0}/postmaster.pid'.format(work_directory)
lines = []

# try to access the socket directory
if not os.access(work_directory, os.R_OK | os.X_OK):
Expand Down Expand Up @@ -312,12 +307,12 @@ def detect_with_postmaster_pid(work_directory, version):


def get_dbname_from_path(db_path):
'''
"""
>>> get_dbname_from_path('foo')
'foo'
>>> get_dbname_from_path('/pgsql_bar/9.4/data')
'bar'
'''
"""
m = re.search(r'/pgsql_(.*?)(/\d+.\d+)?/data/?', db_path)
if m:
dbname = m.group(1)
Expand Down

0 comments on commit d64ca0f

Please sign in to comment.