Skip to content

Commit

Permalink
pyflakes linting
Browse files Browse the repository at this point in the history
- Some pyflakes linting done with unused imports and unused variables
- Added validate_settings to webapp

Modified:
skyline/boundary/boundary.py
skyline/boundary/boundary_alerters.py
skyline/boundary/boundary_algorithms.py
skyline/crucible/crucible.py
skyline/crucible/crucible_algorithms.py
skyline/panorama/panorama.py
skyline/webapp/backend.py
skyline/webapp/gunicorn.py
skyline/webapp/webapp.py
  • Loading branch information
earthgecko committed Aug 14, 2016
1 parent 888f81f commit f09e6c7
Show file tree
Hide file tree
Showing 9 changed files with 95 additions and 82 deletions.
8 changes: 3 additions & 5 deletions skyline/boundary/boundary.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,11 @@
from threading import Thread
from collections import defaultdict
from multiprocessing import Process, Manager, Queue
from msgpack import Unpacker, unpackb, packb
from os import path, kill, getpid, system, listdir
from os.path import dirname, join, abspath, isfile
from msgpack import Unpacker, packb
from os import path, kill, getpid
from math import ceil
import traceback
import operator
import socket
import re
import os
import errno
Expand Down Expand Up @@ -787,7 +785,7 @@ def run(self):
send_alert = True
if ENABLE_BOUNDARY_DEBUG:
logger.info("debug :: alerts_sent set to %s" % str(alerts_sent))
logger.info("debug :: send_alert set to %s" % str(sent_alert))
logger.info("debug :: send_alert set to %s" % str(send_alert))
else:
if ENABLE_BOUNDARY_DEBUG:
logger.info("debug :: redis alerter key retrieved, unpacking" + str(alerter_sent_count_key))
Expand Down
18 changes: 14 additions & 4 deletions skyline/boundary/boundary_alerters.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import logging
from smtplib import SMTP
import boundary_alerters
try:
Expand All @@ -24,6 +25,11 @@
sys.path.insert(0, os.path.dirname(__file__))
import settings

skyline_app = 'boundary'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)

"""
Create any alerter you want here. The function is invoked from trigger_alert.
4 arguments will be passed in as strings:
Expand Down Expand Up @@ -135,7 +141,7 @@ def alert_pagerduty(datapoint, metric_name, expiration_time, metric_trigger, alg
pager = pygerduty.PagerDuty(settings.BOUNDARY_PAGERDUTY_OPTS['subdomain'], settings.BOUNDARY_PAGERDUTY_OPTS['auth_token'])
pager.trigger_incident(settings.BOUNDARY_PAGERDUTY_OPTS['key'], 'Anomalous metric: %s (value: %s) - %s' % (metric_name, datapoint, algorithm))
else:
pagerduty_not_enabled = True
return False


def alert_hipchat(datapoint, metric_name, expiration_time, metric_trigger, algorithm):
Expand Down Expand Up @@ -192,7 +198,7 @@ def alert_hipchat(datapoint, metric_name, expiration_time, metric_trigger, algor
for room in rooms:
hipster.method('rooms/message', method='POST', parameters={'room_id': room, 'from': 'skyline', 'color': settings.BOUNDARY_HIPCHAT_OPTS['color'], 'message': '%s - Boundary - %s - Anomalous metric: %s (value: %s) at %s hours %s' % (sender, algorithm, metric_name, datapoint, graphite_previous_hours, embed_graph)})
else:
hipchat_not_enabled = True
return False


def alert_syslog(datapoint, metric_name, expiration_time, metric_trigger, algorithm):
Expand All @@ -211,7 +217,7 @@ def alert_syslog(datapoint, metric_name, expiration_time, metric_trigger, algori
syslog.openlog(syslog_ident, syslog.LOG_PID, syslog.LOG_LOCAL4)
syslog.syslog(4, message)
else:
syslog_not_enabled = True
return False


def trigger_alert(alerter, datapoint, metric_name, expiration_time, metric_trigger, algorithm):
Expand All @@ -221,4 +227,8 @@ def trigger_alert(alerter, datapoint, metric_name, expiration_time, metric_trigg
else:
strategy = 'alert_%s' % alerter

getattr(boundary_alerters, strategy)(datapoint, metric_name, expiration_time, metric_trigger, algorithm)
try:
getattr(boundary_alerters, strategy)(datapoint, metric_name, expiration_time, metric_trigger, algorithm)
except:
logger.error('error :: alerters - %s - getattr error' % strategy)
logger.info(traceback.format_exc())
7 changes: 0 additions & 7 deletions skyline/boundary/boundary_algorithms.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,8 @@
import pandas
import numpy as np
import scipy
import statsmodels.api as sm
import traceback
import logging
import re
from time import time
from msgpack import unpackb, packb
from redis import StrictRedis

import sys
Expand All @@ -15,7 +11,6 @@
sys.path.insert(0, os.path.dirname(__file__))

from settings import (
FULL_DURATION,
MAX_TOLERABLE_BOREDOM,
MIN_TOLERABLE_LENGTH,
STALE_PERIOD,
Expand Down Expand Up @@ -314,14 +309,12 @@ def run_selected_algorithm(
logger.info('debug :: auto aggregating %s for %s' % (metric_name, algorithm))
try:
agg_timeseries = autoaggregate_ts(timeseries, autoaggregate_value)
aggregatation_failed = False
if ENABLE_BOUNDARY_DEBUG:
logger.info(
'debug :: aggregated_timeseries returned %s for %s' % (
metric_name, algorithm))
except Exception as e:
agg_timeseries = []
aggregatation_failed = True
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug error - autoaggregate excpection %s for %s' % (metric_name, algorithm))
logger.error('Algorithm error: %s' % traceback.format_exc())
Expand Down
23 changes: 10 additions & 13 deletions skyline/crucible/crucible.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,18 +7,16 @@
import time
from time import time, sleep
from threading import Thread
from multiprocessing import Process, Manager, Queue
from msgpack import Unpacker, unpackb, packb
from multiprocessing import Process, Manager
from msgpack import packb
import os
from os.path import dirname, join, abspath, isfile
from os import path, kill, getpid, system, getcwd, listdir, makedirs
from os.path import join, isfile
from os import kill, getpid, listdir
from sys import exit, version_info
import traceback
import re
import socket
import json
import gzip
import sys
import requests
try:
import urlparse
Expand All @@ -30,7 +28,6 @@
import urllib.request
import urllib.error
import errno
import imp
import datetime
import shutil

Expand All @@ -39,7 +36,7 @@
# sys.path.insert(0, os.path.dirname(__file__))

import settings
from skyline_functions import mkdir_p, load_metric_vars, fail_check
from skyline_functions import load_metric_vars, fail_check

from crucible_algorithms import run_algorithms

Expand Down Expand Up @@ -357,7 +354,7 @@ def spin_process(self, i, run_timestamp, metric_check_file):
try:
last_check = self.redis_conn.get(cache_key)
except Exception as e:
logger.error('error :: could not query cache_key for %s - %s - %s' % (alerter, metric, e))
logger.error('error :: could not query cache_key for %s - %s - %s' % (source_app, metric, e))
logger.info('all anomaly files will be removed')
remove_all_anomaly_files = True

Expand All @@ -366,7 +363,7 @@ def spin_process(self, i, run_timestamp, metric_check_file):
self.redis_conn.setex(cache_key, expiration_timeout, packb(value))
logger.info('set cache_key for %s - %s with timeout of %s' % (source_app, metric, str(expiration_timeout)))
except Exception as e:
logger.error('error :: could not query cache_key for %s - %s - %s' % (alerter, metric, e))
logger.error('error :: could not query cache_key for %s - %s - %s' % (source_app, metric, e))
logger.info('all anomaly files will be removed')
remove_all_anomaly_files = True
else:
Expand Down Expand Up @@ -661,8 +658,6 @@ def spin_process(self, i, run_timestamp, metric_check_file):
# Run crucible algorithms
logger.info('running crucible tests - %s' % (metric))

timeseries_dir = metric.replace('.', '/')

if os.path.isfile(anomaly_json_gz):
if not os.path.isfile(anomaly_json):
if settings.ENABLE_CRUCIBLE_DEBUG:
Expand Down Expand Up @@ -797,9 +792,11 @@ def spin_process(self, i, run_timestamp, metric_check_file):
os.system('%s %s' % (str(run_script), str(crucible_anomaly_file)))

# Remove metric check file
nothing_to_do = ''

try:
os.remove(metric_check_file)
logger.info('complete removed check file - %s' % (metric_check_file))
logger.info('complete removed check file - %s %s' % (metric_check_file, nothing_to_do))
except OSError:
pass

Expand Down
36 changes: 20 additions & 16 deletions skyline/crucible/crucible_algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,9 @@
import logging
import os
import time
from multiprocessing import Process
from sys import version_info

from os.path import dirname, join, abspath
from os import makedirs
from os.path import join

import sys
import os.path
Expand All @@ -20,12 +18,14 @@

from settings import (
ALGORITHMS,
ENABLE_CRUCIBLE_DEBUG,
MIRAGE_ALGORITHMS,
PANDAS_VERSION,
)

logger = logging.getLogger("crucibleLog")
skyline_app = 'crucible'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)

python_version = int(version_info[0])

"""
Expand Down Expand Up @@ -133,8 +133,8 @@ def first_hour_average(timeseries, end_timestamp, full_duration):
# last_hour_threshold = int_end_timestamp - (int_full_duration - 3600)
int_second_last_end_timestamp = int(timeseries[-2][0])
resolution = int_end_timestamp - int_second_last_end_timestamp
ten_data_point_seconds = resolution * 10
ten_datapoints_ago = int_end_timestamp - ten_data_point_seconds
# @modified 20160814 - pyflaked
# ten_data_point_seconds = resolution * 10
sixty_data_point_seconds = resolution * 60
sixty_datapoints_ago = int_end_timestamp - sixty_data_point_seconds
last_hour_threshold = int_end_timestamp - (int_full_duration - sixty_datapoints_ago)
Expand Down Expand Up @@ -209,10 +209,11 @@ def mean_subtraction_cumulation(timeseries, end_timestamp, full_duration):
series = pandas.Series([x[1] if x[1] else 0 for x in timeseries])
series = series - series[0:len(series) - 1].mean()
stdDev = series[0:len(series) - 1].std()
if PANDAS_VERSION < '0.18.0':
expAverage = pandas.stats.moments.ewma(series, com=15)
else:
expAverage = pandas.Series.ewm(series, ignore_na=False, min_periods=0, adjust=True, com=15).mean()
# @modified 20160814 - pyflaked
# if PANDAS_VERSION < '0.18.0':
# expAverage = pandas.stats.moments.ewma(series, com=15)
# else:
# expAverage = pandas.Series.ewm(series, ignore_na=False, min_periods=0, adjust=True, com=15).mean()

if PANDAS_VERSION < '0.17.0':
return abs(series.iget(-1)) > 3 * stdDev
Expand All @@ -234,8 +235,9 @@ def least_squares(timeseries, end_timestamp, full_duration):
x = np.array([t[0] for t in timeseries])
y = np.array([t[1] for t in timeseries])
A = np.vstack([x, np.ones(len(x))]).T
results = np.linalg.lstsq(A, y)
residual = results[1]
# @modified 20160814 - pyflaked
# results = np.linalg.lstsq(A, y)
# residual = results[1]
m, c = np.linalg.lstsq(A, y)[0]
errors = []
# Evaluate append once, not every time in the loop - this gains ~0.020 s on
Expand Down Expand Up @@ -306,8 +308,9 @@ def ks_test(timeseries, end_timestamp, full_duration):
try:
int_end_timestamp = int(timeseries[-1][0])

hour_ago = int_end_timestamp - 3600
ten_minutes_ago = int_end_timestamp - 600
# @modified 20160814 - pyflaked
# hour_ago = int_end_timestamp - 3600
# ten_minutes_ago = int_end_timestamp - 600
# Determine resolution of the data set
# reference = scipy.array([x[1] for x in timeseries if x[0] >= hour_ago and x[0] < ten_minutes_ago])
# probe = scipy.array([x[1] for x in timeseries if x[0] >= ten_minutes_ago])
Expand Down Expand Up @@ -485,7 +488,8 @@ def run_algorithms(
logger.info('info :: error thrown in algorithm running and plotting - %s' % (str(algorithm)))

end_analysis = int(time.time())
seconds_to_run = end_analysis - start_analysis
# @modified 20160814 - pyflaked
# seconds_to_run = end_analysis - start_analysis
# logger.info(
# 'analysis of %s at a full duration of %s took %s seconds' %
# (timeseries_name, str(full_duration), str(seconds_to_run)))
Expand Down
52 changes: 32 additions & 20 deletions skyline/panorama/panorama.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,33 +6,18 @@
from redis import StrictRedis
from time import time, sleep
from threading import Thread
from collections import defaultdict
from multiprocessing import Process, Manager, Queue
from msgpack import Unpacker, unpackb, packb
from multiprocessing import Process, Manager
from msgpack import Unpacker, packb
import os
from os import path, kill, getpid, system, listdir
from os import kill, getpid, listdir
from os.path import join, isfile
from math import ceil
import traceback
import operator
import socket
import re
import imp
import shutil
from sys import version_info
import mysql.connector
from mysql.connector import errorcode

import settings
from skyline_functions import send_graphite_metric, mkdir_p, load_metric_vars, fail_check

# Converting one settings variable into a local variable, just because it is a
# long string otherwise.
try:
ENABLE_PANORAMA_DEBUG = settings.ENABLE_PANORAMA_DEBUG
except:
logger.error('error :: cannot determine ENABLE_PANORAMA_DEBUG from settings' % skyline_app)
ENABLE_PANORAMA_DEBUG = False
from skyline_functions import load_metric_vars, fail_check

skyline_app = 'panorama'
skyline_app_logger = '%sLog' % skyline_app
Expand All @@ -45,6 +30,14 @@

this_host = str(os.uname()[1])

# Converting one settings variable into a local variable, just because it is a
# long string otherwise.
try:
ENABLE_PANORAMA_DEBUG = settings.ENABLE_PANORAMA_DEBUG
except:
logger.error('error :: cannot determine ENABLE_PANORAMA_DEBUG from settings' % skyline_app)
ENABLE_PANORAMA_DEBUG = False

try:
SERVER_METRIC_PATH = '.%s' % settings.SERVER_METRICS_NAME
if SERVER_METRIC_PATH == '.':
Expand Down Expand Up @@ -496,6 +489,7 @@ def determine_id(table, key, value):
query_cache_key, str(determined_id)))
except Exception as e:
logger.error(traceback.format_exc())
logger.error('%s' % str(e))
logger.error('error :: failed to set query_cache_key - %s - id: %s' % (
query_cache_key, str(determined_id)))
return determined_id
Expand Down Expand Up @@ -819,7 +813,6 @@ def run(self):
spawned_pids = []
pid_count = 0
now = time()
run_timestamp = int(now)
for i in range(1, settings.PANORAMA_PROCESSES + 1):
try:
p = Process(target=self.spin_process, args=(i, metric_check_file))
Expand Down Expand Up @@ -857,4 +850,23 @@ def run(self):
for p in pids:
p.terminate()
p.join()

check_file_name = os.path.basename(str(metric_check_file))
if settings.ENABLE_PANORAMA_DEBUG:
logger.info('debug :: check_file_name - %s' % check_file_name)
check_file_timestamp = check_file_name.split('.', 1)[0]
if settings.ENABLE_PANORAMA_DEBUG:
logger.info('debug :: check_file_timestamp - %s' % str(check_file_timestamp))
check_file_metricname_txt = check_file_name.split('.', 1)[1]
if settings.ENABLE_PANORAMA_DEBUG:
logger.info('debug :: check_file_metricname_txt - %s' % check_file_metricname_txt)
check_file_metricname = check_file_metricname_txt.replace('.txt', '')
if settings.ENABLE_PANORAMA_DEBUG:
logger.info('debug :: check_file_metricname - %s' % check_file_metricname)
check_file_metricname_dir = check_file_metricname.replace('.', '/')
if settings.ENABLE_PANORAMA_DEBUG:
logger.info('debug :: check_file_metricname_dir - %s' % check_file_metricname_dir)

metric_failed_check_dir = '%s/%s/%s' % (failed_checks_dir, check_file_metricname_dir, check_file_timestamp)

fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
9 changes: 3 additions & 6 deletions skyline/webapp/backend.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,12 @@
import logging
import traceback
import sys
import re
from os import path
import string
import operator
from time import time, sleep

from flask import Flask, request, render_template, redirect
import mysql.connector
from mysql.connector import errorcode
from flask import request
# import mysql.connector
# from mysql.connector import errorcode

import settings
from skyline_functions import get_graphite_metric, mysql_select
Expand Down

0 comments on commit f09e6c7

Please sign in to comment.