diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..1ae3595 --- /dev/null +++ b/.gitignore @@ -0,0 +1,32 @@ +*.py[co] + +# Packages +*.egg +*.egg-info +dist +build +eggs +parts +var +sdist +develop-eggs +.installed.cfg + +# Installer logs +pip-log.txt + +# Unit test / coverage reports +.coverage +.tox + +#Translations +*.mo + +#Mr Developer +.mr.developer.cfg + +# virutalenvs +.venv + +# debug stuff +test.py diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000..1333ed7 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1 @@ +TODO diff --git a/README.md b/README.md index b8ece0d..f29f16a 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,99 @@ -stackify-api-python -=================== - Stackify API for Python +======= + +[Stackify](https://stackify.com) support for Python programs. + +```python +import stackify + +logger = stackify.getLogger() + +try: + "Make it so, #" + 1 +except: + logger.exception("Can't add strings and numbers") +``` + +## Installation +stackify-python can be installed through pip: +```bash +$ pip install -U stackify +``` + +You can also check out the repository and install with setuptools: +```bash +$ ./setup.py install +``` + +## Configuration +Your Stackify setup information can be provided via environment variables. For example: +```bash +export STACKIFY_APPLICATION=MyApp +export STACKIFY_ENVIRONMENT=Dev +export STACKIFY_API_KEY=****** +``` + +These options can also be provided in your code: +```python +import stackify + +logger = stackify.getLogger(application="MyApp", environment="Dev", api_key=******) +logger.warning('Something happened') +``` + +## Usage + +stackify-python handles uploads in batches of 100 messages at a time on another thread. +When your program exits, it will shut the thread down and upload the remaining messages. + +Stackify can store extra data along with your log message: +```python +import stackify + +logger = stackify.getLogger() + +try: + user_string = raw_input("Enter a number: ") + print("You entered", int(user_string)) +except ValueError: + logger.exception('Bad input', extra={'user entered': user_string}) +``` + +You can also name your logger instead of using the automatically generated one: +```python +import stackify + +logger = stackify.getLogger('mymodule.myfile') +``` + +## Internal Logger + +This library has an internal logger it uses for debugging and messaging. +For example, if you want to enable debug messages: +```python +import logging + +logging.getLogger('stackify').setLevel(logging.DEBUG) +``` + +By default, it will enable the default logging settings via `logging.basicConfig()` +and print `WARNING` level messages and above. If you wish to set everything up yourself, +just pass `basic_config=False` in `getLogger`: +```python +import stackify + +logger = stackify.getLogger(basic_config=False) +``` + +## Testing +Run the test suite with setuptools: +```bash +$ ./setup.py test +``` + +You can obtain a coverage report with nose: +```bash +$ ./setup nosetests --with-coverage --cover-package=stackify +``` +You might need to install the `nose` and `coverage` packages. + diff --git a/setup.py b/setup.py new file mode 100755 index 0000000..d19d42b --- /dev/null +++ b/setup.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python +from setuptools import setup +import re +import ast + +try: + from pypandoc import convert + read_md = lambda f: convert(f, 'rst') +except ImportError: + print('warning: pypandoc module not found, could not convert Markdown to RST') + read_md = lambda f: open(f).read() + +version_re = re.compile(r'__version__\s+=\s+(.*)') + +with open('stackify/__init__.py') as f: + f = f.read() + version = ast.literal_eval(version_re.search(f).group(1)) + +setup( + name = 'stackify', + version = version, + author = 'Matthew Thompson', + author_email = 'chameleonator@gmail.com', + packages = ['stackify'], + url = 'https://github.com/stackify/stackify-api-python', + license = open('LICENSE.txt').readline(), + description = 'Stackify API for Python', + long_description = read_md('README.md'), + download_url = 'https://github.com/stackify/stackify-api-python/tarball/0.0.1', + keywords = ['logging', 'stackify', 'exception'], + classifiers=["Programming Language :: Python"], + install_requires = [ + 'retrying>=1.2.3', + 'requests>=2.4.1' + ], + test_suite = 'tests', + tests_requires = [ + 'mock>=1.0.1', + 'nose==1.3.4' + ] +) + diff --git a/stackify/__init__.py b/stackify/__init__.py new file mode 100644 index 0000000..f276a43 --- /dev/null +++ b/stackify/__init__.py @@ -0,0 +1,120 @@ +""" +Stackify Python API +""" + +__version__ = '0.0.1' + + +API_URL = 'https://api.stackify.com' + +READ_TIMEOUT = 5000 + +MAX_BATCH = 100 + +QUEUE_SIZE = 1000 + +import logging +import inspect +import atexit + +DEFAULT_LEVEL = logging.ERROR + +LOGGING_LEVELS = { + logging.CRITICAL: 'CRITICAL', + logging.ERROR: 'ERROR', + logging.WARNING: 'WARNING', + logging.INFO: 'INFO', + logging.DEBUG: 'DEBUG', + logging.NOTSET: 'NOTSET' +} + + +class NullHandler(logging.Handler): + def emit(self, record): + pass + +logging.getLogger(__name__).addHandler(NullHandler()) + + +from stackify.application import ApiConfiguration +from stackify.http import HTTPClient + +from stackify.handler import StackifyHandler + + +def getLogger(name=None, auto_shutdown=True, basic_config=True, **kwargs): + '''Get a logger and attach a StackifyHandler if needed. + + You can pass this function keyword arguments for Stackify configuration. + If they are omitted you can specify them through environment variables: + * STACKIFY_API_KEY + * STACKIFY_APPLICATION + * STACKIFY_ENVIRONMENT + * STACKIFY_API_URL + + Args: + name: The name of the logger (or None to automatically make one) + auto_shutdown: Register an atexit hook to shut down logging + basic_config: Set up with logging.basicConfig() for regular logging + + Optional Args: + api_key: Your Stackify API key + application: The name of your Stackify application + environment: The Stackfiy environment to log to + api_url: An optional API url if required + + Returns: + A logger instance with Stackify handler and listener attached. + ''' + if basic_config: + logging.basicConfig() + + if not name: + name = getCallerName(2) + + logger = logging.getLogger(name) + + if not [isinstance(x, StackifyHandler) for x in logger.handlers]: + internal_logger = logging.getLogger(__name__) + internal_logger.debug('Creating handler for logger %s', name) + handler = StackifyHandler(**kwargs) + logger.addHandler(handler) + + if auto_shutdown: + internal_logger.debug('Registering atexit callback') + atexit.register(stopLogging, logger) + + if logger.getEffectiveLevel() == logging.NOTSET: + logger.setLevel(DEFAULT_LEVEL) + + handler.listener.start() + + return logger + + +def stopLogging(logger): + '''Stop logging on the Stackify handler. + + Shut down the StackifyHandler on a given logger. This will block + and wait for the queue to finish uploading. + ''' + internal_logger = logging.getLogger(__name__) + internal_logger.debug('Shutting down all handlers') + for handler in getHandlers(logger): + handler.listener.stop() + + +def getCallerName(levels=1): + '''Gets the name of the module calling this function''' + try: + frame = inspect.stack()[levels] + module = inspect.getmodule(frame[0]) + name = module.__name__ + except IndexError: + name = 'stackify-python-unknown' + return name + + +def getHandlers(logger): + '''Return the StackifyHandlers on a given logger''' + return [x for x in logger.handlers if isinstance(x, StackifyHandler)] diff --git a/stackify/application.py b/stackify/application.py new file mode 100644 index 0000000..69e8e71 --- /dev/null +++ b/stackify/application.py @@ -0,0 +1,44 @@ +import socket +import os + +from stackify import API_URL +from stackify.formats import JSONObject + + +class EnvironmentDetail(JSONObject): + def __init__(self, api_config): + self.deviceName = socket.gethostname() + self.appLocation = os.getcwd() + self.configuredAppName = api_config.application + self.configuredEnvironmentName = api_config.environment + + +class ApiConfiguration: + def __init__(self, api_key, application, environment, api_url=API_URL): + self.api_key = api_key + self.api_url = api_url + self.application = application + self.environment = environment + + +def arg_or_env(name, args, default=None): + env_name = 'STACKIFY_{0}'.format(name.upper()) + try: + value = args.get(name) + if not value: + value = os.environ[env_name] + return value + except KeyError: + if default: + return default + else: + raise NameError('You must specify the keyword argument {0} or ' + 'environment variable {1}'.format(name, env_name)) + + +def get_configuration(**kwargs): + return ApiConfiguration( + application=arg_or_env('application', kwargs), + environment=arg_or_env('environment', kwargs), + api_key=arg_or_env('api_key', kwargs), + api_url=arg_or_env('api_url', kwargs, API_URL)) diff --git a/stackify/error.py b/stackify/error.py new file mode 100644 index 0000000..7ce5c42 --- /dev/null +++ b/stackify/error.py @@ -0,0 +1,74 @@ +import traceback +import time +import sys + +from stackify.formats import JSONObject + + +class ErrorItem(JSONObject): + def __init__(self): + self.Message = None # exception message + self.ErrorType = None # exception class name + self.ErrorTypeCode = None + self.Data = None # custom data + self.SourceMethod = None + self.StackTrace = [] # array of TraceFrames + self.InnerError = None # cause? + + def load_stack(self, exc_info=None): + if not exc_info: + type_, value, tb = sys.exc_info() + else: + type_, value, tb = exc_info + + stacks = traceback.extract_tb(tb) + + self.ErrorType = type_.__name__ + self.Message = str(value) + self.SourceMethod = stacks[-1][2] + for filename, lineno, method, text in reversed(stacks): + self.StackTrace.append(TraceFrame(filename, lineno, method)) + + +class TraceFrame(JSONObject): + def __init__(self, filename, lineno, method): + self.CodeFileName = filename + self.LineNum = lineno + self.Method = method + + +class WebRequestDetail(JSONObject): + def __init__(self): + self.UserIPAddress = None + self.HttpMethod = None + self.RequestProtocol = None + self.RequestUrl = None + self.RequestUrlRoot = None + self.ReferralUrl = None + self.Headers = {} + self.Cookies = {} + self.QueryString = {} + self.PostData = {} + self.SessionData = {} + self.PostDataRaw = None + self.MVCAction = None + self.MVCController = None + self.MVCArea = None + + +class StackifyError(JSONObject): + def __init__(self): + self.EnvironmentDetail = None # environment detail object + self.OccurredEpochMillis = None + self.Error = None # ErrorItem object + self.WebRequestDetail = None # WebRequestDetail object + self.CustomerName = None + self.UserName = None + + def load_exception(self, exc_info=None): + self.Error = ErrorItem() + self.Error.load_stack(exc_info) + + def from_record(self, record): + self.load_exception(record.exc_info) + self.OccurredEpochMillis = int(record.created * 1000) diff --git a/stackify/formats.py b/stackify/formats.py new file mode 100644 index 0000000..eb07b16 --- /dev/null +++ b/stackify/formats.py @@ -0,0 +1,10 @@ +import json + + +def nonempty(d): + return {k: v for k, v in d.items() if v is not None} + + +class JSONObject(object): + def toJSON(self): + return json.dumps(self, default=lambda x: nonempty(x.__dict__)) diff --git a/stackify/handler.py b/stackify/handler.py new file mode 100644 index 0000000..20b44a4 --- /dev/null +++ b/stackify/handler.py @@ -0,0 +1,101 @@ +import logging +import threading +import os + +try: + from logging.handlers import QueueHandler, QueueListener +except: # pragma: no cover + from stackify.handler_backport import QueueHandler, QueueListener + +try: + import Queue as queue +except ImportError: # pragma: no cover + import queue + +from stackify import QUEUE_SIZE, API_URL, MAX_BATCH +from stackify.log import LogMsg, LogMsgGroup +from stackify.error import ErrorItem +from stackify.http import HTTPClient +from stackify.application import get_configuration + + +class StackifyHandler(QueueHandler): + ''' + A handler class to format and queue log messages for later + transmission to Stackify servers. + ''' + + def __init__(self, queue_=None, listener=None, **kwargs): + if queue_ is None: + queue_ = queue.Queue(QUEUE_SIZE) + logger = logging.getLogger(__name__) + + super(StackifyHandler, self).__init__(queue_) + + if listener is None: + listener = StackifyListener(queue_, **kwargs) + + self.listener = listener + + def enqueue(self, record): + ''' + Put a new record on the queue. If it's full, evict an item. + ''' + try: + self.queue.put_nowait(record) + except queue.Full: + logger = logging.getLogger(__name__) + logger.warn('StackifyHandler queue is full, ' + 'evicting oldest record') + self.queue.get_nowait() + self.queue.put_nowait(record) + + +class StackifyListener(QueueListener): + ''' + A listener to read queued log messages and send them to Stackify. + ''' + + def __init__(self, queue_, max_batch=MAX_BATCH, config=None, **kwargs): + super(StackifyListener, self).__init__(queue_) + + if config is None: + config = get_configuration(**kwargs) + + self.max_batch = max_batch + self.messages = [] + self.http = HTTPClient(config) + + def handle(self, record): + if not self.http.identified: + logger = logging.getLogger(__name__) + logger.debug('Identifying application') + self.http.identify_application() + + msg = LogMsg() + msg.from_record(record) + self.messages.append(msg) + + if len(self.messages) >= self.max_batch: + self.send_group() + + def send_group(self): + group = LogMsgGroup(self.messages) + try: + self.http.send_log_group(group) + except: + logger = logging.getLogger(__name__) + logger.exception('Could not send %s log messages, discarding', + len(self.messages)) + del self.messages[:] + + def stop(self): + logger = logging.getLogger(__name__) + logger.debug('Shutting down listener') + super(StackifyListener, self).stop() + + # send any remaining messages + if self.messages: + logger.debug('%s messages left on shutdown, uploading', + len(self.messages)) + self.send_group() diff --git a/stackify/handler_backport.py b/stackify/handler_backport.py new file mode 100644 index 0000000..84b31d7 --- /dev/null +++ b/stackify/handler_backport.py @@ -0,0 +1,203 @@ +# Copyright 2001-2013 by Vinay Sajip. All Rights Reserved. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose and without fee is hereby granted, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of Vinay Sajip +# not be used in advertising or publicity pertaining to distribution +# of the software without specific, written prior permission. +# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +import logging +import threading +try: + import Queue as queue +except ImportError: + import queue + + +class QueueHandler(logging.Handler): + """ + This handler sends events to a queue. Typically, it would be used together + with a multiprocessing Queue to centralise logging to file in one process + (in a multi-process application), so as to avoid file write contention + between processes. + + This code is new in Python 3.2, but this class can be copy pasted into + user code for use with earlier Python versions. + """ + + def __init__(self, queue): + """ + Initialise an instance, using the passed queue. + """ + logging.Handler.__init__(self) + self.queue = queue + + def enqueue(self, record): + """ + Enqueue a record. + + The base implementation uses put_nowait. You may want to override + this method if you want to use blocking, timeouts or custom queue + implementations. + """ + self.queue.put_nowait(record) + + def prepare(self, record): + """ + Prepares a record for queuing. The object returned by this method is + enqueued. + + The base implementation formats the record to merge the message + and arguments, and removes unpickleable items from the record + in-place. + + You might want to override this method if you want to convert + the record to a dict or JSON string, or send a modified copy + of the record while leaving the original intact. + """ + # The format operation gets traceback text into record.exc_text + # (if there's exception data), and also puts the message into + # record.message. We can then use this to replace the original + # msg + args, as these might be unpickleable. We also zap the + # exc_info attribute, as it's no longer needed and, if not None, + # will typically not be pickleable. + self.format(record) + record.msg = record.message + record.args = None + record.exc_info = None + return record + + def emit(self, record): + """ + Emit a record. + + Writes the LogRecord to the queue, preparing it for pickling first. + """ + try: + self.enqueue(self.prepare(record)) + except Exception: + self.handleError(record) + + +class QueueListener(object): + """ + This class implements an internal threaded listener which watches for + LogRecords being added to a queue, removes them and passes them to a + list of handlers for processing. + """ + _sentinel = None + + def __init__(self, queue, *handlers): + """ + Initialise an instance with the specified queue and + handlers. + """ + self.queue = queue + self.handlers = handlers + self._stop = threading.Event() + self._thread = None + + def dequeue(self, block): + """ + Dequeue a record and return it, optionally blocking. + + The base implementation uses get. You may want to override this method + if you want to use timeouts or work with custom queue implementations. + """ + return self.queue.get(block) + + def start(self): + """ + Start the listener. + + This starts up a background thread to monitor the queue for + LogRecords to process. + """ + self._thread = t = threading.Thread(target=self._monitor) + t.setDaemon(True) + t.start() + + def prepare(self, record): + """ + Prepare a record for handling. + + This method just returns the passed-in record. You may want to + override this method if you need to do any custom marshalling or + manipulation of the record before passing it to the handlers. + """ + return record + + def handle(self, record): + """ + Handle a record. + + This just loops through the handlers offering them the record + to handle. + """ + record = self.prepare(record) + for handler in self.handlers: + handler.handle(record) + + def _monitor(self): + """ + Monitor the queue for records, and ask the handler + to deal with them. + + This method runs on a separate, internal thread. + The thread will terminate if it sees a sentinel object in the queue. + """ + q = self.queue + has_task_done = hasattr(q, 'task_done') + while not self._stop.isSet(): + try: + record = self.dequeue(True) + if record is self._sentinel: + break + self.handle(record) + if has_task_done: + q.task_done() + except queue.Empty: + pass + # There might still be records in the queue. + while True: + try: + record = self.dequeue(False) + if record is self._sentinel: + break + self.handle(record) + if has_task_done: + q.task_done() + except queue.Empty: + break + + def enqueue_sentinel(self): + """ + This is used to enqueue the sentinel record. + + The base implementation uses put_nowait. You may want to override this + method if you want to use timeouts or work with custom queue + implementations. + """ + self.queue.put_nowait(self._sentinel) + + def stop(self): + """ + Stop the listener. + + This asks the thread to terminate, and then waits for it to do so. + Note that if you don't call this before your application exits, there + may be some records still left on the queue, which won't be processed. + """ + self._stop.set() + self.enqueue_sentinel() + self._thread.join() + self._thread = None diff --git a/stackify/http.py b/stackify/http.py new file mode 100644 index 0000000..82b8ca7 --- /dev/null +++ b/stackify/http.py @@ -0,0 +1,92 @@ +import requests +import retrying +import logging +import gzip + +try: + from cStringIO import StringIO +except: + try: + from StringIO import StringIO + except: + pass # python 3, we use a new function in gzip + + +def gzip_compress(data): + if hasattr(gzip, 'compress'): + return gzip.compress(bytes(data, 'utf-8')) # python 3 + else: + s = StringIO() + g = gzip.GzipFile(fileobj=s, mode='w') + g.write(data) + g.close() + return s.getvalue() + + +from stackify.application import EnvironmentDetail +from stackify import READ_TIMEOUT + + +class HTTPClient: + def __init__(self, api_config): + self.api_config = api_config + self.environment_detail = EnvironmentDetail(api_config) + self.app_name_id = None + self.app_env_id = None + self.device_id = None + self.device_app_id = None + self.device_alias = None + self.identified = False + + def POST(self, url, json_object, use_gzip=False): + request_url = self.api_config.api_url + url + logger = logging.getLogger(__name__) + logger.debug('Request URL: %s', request_url) + + headers = { + 'Content-Type': 'application/json', + 'X-Stackify-Key': self.api_config.api_key, + 'X-Stackify-PV': 'V1', + } + + try: + payload_data = json_object.toJSON() + logger.debug('POST data: %s', payload_data) + + if use_gzip: + headers['Content-Encoding'] = 'gzip' + payload_data = gzip_compress(payload_data) + + response = requests.post(request_url, + data=payload_data, + headers=headers, + timeout=READ_TIMEOUT) + logger.debug('Response: %s', response.text) + return response.json() + except requests.exceptions.RequestException: + logger.exception('HTTP exception') + raise + except ValueError as e: + # could not read json response + logger.exception('Cannot decode JSON response') + raise + + @retrying.retry(wait_exponential_multiplier=1000, stop_max_delay=10000) + def identify_application(self): + result = self.POST('/Metrics/IdentifyApp', self.environment_detail) + self.app_name_id = result.get('AppNameID') + self.app_env_id = result.get('AppEnvID') + self.device_id = result.get('DeviceID') + self.device_app_id = result.get('DeviceAppID') + self.device_alias = result.get('DeviceAlias') + self.identified = True + + @retrying.retry(wait_exponential_multiplier=1000, stop_max_delay=10000) + def send_log_group(self, group): + group.CDID = self.device_id + group.CDAppID = self.device_app_id + group.AppNameID = self.app_name_id + group.ServerName = self.device_alias + if not group.ServerName: + group.ServerName = self.environment_detail.deviceName + self.POST('/Log/Save', group, True) diff --git a/stackify/log.py b/stackify/log.py new file mode 100644 index 0000000..c51f5f3 --- /dev/null +++ b/stackify/log.py @@ -0,0 +1,57 @@ +import json +import logging + +from stackify.formats import JSONObject + +from stackify import MAX_BATCH, LOGGING_LEVELS +from stackify.error import StackifyError + + +# this is used to separate builtin keys from user-specified keys +RECORD_VARS = set(logging.LogRecord('', '', '', '', + '', '', '', '').__dict__.keys()) + +# the "message" attribute is saved on the record object by a Formatter +RECORD_VARS.add('message') + + +class LogMsg(JSONObject): + def __init__(self): + self.Msg = None + self.data = None + self.Ex = None # a StackifyError object + self.Th = None + self.EpochMs = None + self.Level = None + self.TransID = None + self.SrcMethod = None + self.SrcLine = None + + def from_record(self, record): + self.Msg = record.getMessage() + self.Th = record.threadName or record.thread + self.EpochMs = int(record.created * 1000) + self.Level = record.levelname + self.SrcMethod = record.funcName + self.SrcLine = record.lineno + + # check for user-specified keys + data = {k: v for k, v in record.__dict__.items() + if k not in RECORD_VARS} + + if data: + self.data = json.dumps(data, default=lambda x: x.__dict__) + + if record.exc_info: + self.Ex = StackifyError() + self.Ex.from_record(record) + + +class LogMsgGroup(JSONObject): + def __init__(self, msgs, logger=None): + self.Logger = logger or __name__ + self.Msgs = msgs + self.CDID = None + self.CDAppID = None + self.AppNameID = None + self.ServerName = None diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/bases.py b/tests/bases.py new file mode 100644 index 0000000..7c4e4b0 --- /dev/null +++ b/tests/bases.py @@ -0,0 +1,30 @@ +import os +import unittest +import retrying + +class ClearEnvTest(unittest.TestCase): + ''' + This class clears the environment variables that the + library uses for clean testing. + ''' + + def setUp(self): + # if you have these specified in the environment it will break tests + to_save = [ + 'STACKIFY_APPLICATION', + 'STACKIFY_ENVIRONMENT', + 'STACKIFY_API_KEY', + 'STACKIFY_API_URL', + ] + self.saved = {} + for key in to_save: + if key in os.environ: + self.saved[key] = os.environ[key] + del os.environ[key] + + def tearDown(self): + # restore deleted environment variables + for key, item in self.saved.items(): + os.environ[key] = item + del self.saved + diff --git a/tests/test_application.py b/tests/test_application.py new file mode 100644 index 0000000..7e43248 --- /dev/null +++ b/tests/test_application.py @@ -0,0 +1,95 @@ +""" +Test the stackify.application module +""" + +import unittest +from mock import patch +import os +from .bases import ClearEnvTest + +from stackify import API_URL +from stackify.application import get_configuration + + +class TestConfig(ClearEnvTest): + ''' + Test automatic configuration for the ApiConfiguration + ''' + + def test_required_kwargs(self): + '''API configuration requires appname, env and key''' + env_map = {} + + with patch.dict('os.environ', env_map): + with self.assertRaises(NameError): + get_configuration() + with self.assertRaises(NameError): + get_configuration(application='1') + with self.assertRaises(NameError): + get_configuration(application='1', environment='2') + with self.assertRaises(NameError): + get_configuration(application='1', environment='2', api_url='3') + + get_configuration(application='1', environment='2', api_key='3') + + def test_environment_config(self): + '''API configuration can load from env vars''' + env_map = { + 'STACKIFY_APPLICATION': 'test1_appname', + 'STACKIFY_ENVIRONMENT': 'test1_environment', + 'STACKIFY_API_KEY': 'test1_apikey', + 'STACKIFY_API_URL': 'test1_apiurl', + } + + with patch.dict('os.environ', env_map): + config = get_configuration() + + self.assertEqual(config.application, 'test1_appname') + self.assertEqual(config.environment, 'test1_environment') + self.assertEqual(config.api_key, 'test1_apikey') + self.assertEqual(config.api_url, 'test1_apiurl') + + def test_kwarg_mix(self): + '''API configuration can load from a mix of env vars and kwargs''' + env_map = { + 'STACKIFY_APPLICATION': 'test2_appname', + 'STACKIFY_ENVIRONMENT': 'test2_environment', + } + + with patch.dict('os.environ', env_map): + config = get_configuration(api_key='test2_apikey', api_url='test2_apiurl') + + self.assertEqual(config.application, 'test2_appname') + self.assertEqual(config.environment, 'test2_environment') + self.assertEqual(config.api_key, 'test2_apikey') + self.assertEqual(config.api_url, 'test2_apiurl') + + def test_kwargs(self): + '''API configuration can load from kwargs''' + config = get_configuration( + application = 'test3_appname', + environment = 'test3_environment', + api_key = 'test3_apikey', + api_url = 'test3_apiurl') + + self.assertEqual(config.application, 'test3_appname') + self.assertEqual(config.environment, 'test3_environment') + self.assertEqual(config.api_key, 'test3_apikey') + self.assertEqual(config.api_url, 'test3_apiurl') + + def test_api_url_default(self): + '''API URL is set automatically''' + config = get_configuration( + application = 'test4_appname', + environment = 'test4_environment', + api_key = 'test4_apikey') + + self.assertEqual(config.application, 'test4_appname') + self.assertEqual(config.environment, 'test4_environment') + self.assertEqual(config.api_key, 'test4_apikey') + self.assertEqual(config.api_url, API_URL) + + +if __name__=='__main__': + unittest.main() + diff --git a/tests/test_formats.py b/tests/test_formats.py new file mode 100644 index 0000000..6c10b64 --- /dev/null +++ b/tests/test_formats.py @@ -0,0 +1,56 @@ +""" +Test the stackify.formats module +""" + +import unittest +from mock import patch, Mock +import json + +from stackify.formats import JSONObject + +class TestJSONObject(unittest.TestCase): + ''' + Test the JSON serializer object + ''' + + def test_json_attributes(self): + '''Attributes are serialized in JSON''' + class MyTest(JSONObject): + def __init__(self): + self.a = '1' + self.b = 2 + self.c = False + result = MyTest().toJSON() + + self.assertEqual(json.loads(result), {'a': '1', 'b': 2, 'c': False}) + + def test_nested_json(self): + '''Nested classes are serialized in JSON''' + class MyParent(JSONObject): + def __init__(self, children): + self.children = children + + class MyChild(JSONObject): + def __init__(self, color): + self.color = color + + result = MyParent([MyChild('red'), MyChild('green')]).toJSON() + + self.assertEqual(json.loads(result), {'children': [{'color': 'red'}, {'color': 'green'}]}) + + def test_nonempty_attributes(self): + '''Only nonempty attributes are serialized''' + class MyTest(JSONObject): + def __init__(self): + self.a = '1' + self.b = False + self.c = None + self.d = [] + result = MyTest().toJSON() + + self.assertEqual(json.loads(result), {'a': '1', 'b': False, 'd': []}) + + +if __name__=='__main__': + unittest.main() + diff --git a/tests/test_handler.py b/tests/test_handler.py new file mode 100644 index 0000000..0ac201b --- /dev/null +++ b/tests/test_handler.py @@ -0,0 +1,115 @@ +""" +Test the stackify.handler module +""" + +import unittest +from mock import patch, Mock + +try: + import Queue as queue +except ImportError: + import queue + +from stackify.handler import StackifyHandler, StackifyListener +from stackify.application import ApiConfiguration + +import logging + + +class TestHandler(unittest.TestCase): + ''' + Test the StackifyHandler class + ''' + + def test_queue_full(self): + '''The queue should evict when full''' + q = queue.Queue(1) + handler = StackifyHandler(queue_=q, listener=Mock()) + # don't print warnings on overflow, so mute stackify logger + logging.getLogger('stackify').propagate = False + handler.enqueue('test1') + handler.enqueue('test2') + handler.enqueue('test3') + self.assertEqual(q.qsize(), 1) + self.assertEqual(q.get(), 'test3') + + +class TestListener(unittest.TestCase): + ''' + Test the StackifyListener class + ''' + + def setUp(self): + self.config = ApiConfiguration( + application = 'test_appname', + environment = 'test_environment', + api_key = 'test_apikey', + api_url = 'test_apiurl') + # don't print warnings on http crashes, so mute stackify logger + logging.getLogger('stackify').propagate = False + + @patch('stackify.handler.LogMsg') + @patch('stackify.handler.StackifyListener.send_group') + @patch('stackify.handler.HTTPClient.POST') + def test_not_identified(self, post, send_group, logmsg): + '''The HTTPClient identifies automatically if needed''' + listener = StackifyListener(queue_=Mock(), config=self.config) + listener.handle(Mock()) + self.assertTrue(listener.http.identified) + + @patch('stackify.handler.LogMsg') + @patch('stackify.handler.LogMsgGroup') + @patch('stackify.handler.HTTPClient.POST') + def test_send_group_if_needed(self, post, logmsggroup, logmsg): + '''The listener sends groups of messages''' + listener = StackifyListener(queue_=Mock(), max_batch=3, config=self.config) + listener.http.identified = True + + listener.handle(1) + self.assertFalse(post.called) + listener.handle(2) + self.assertFalse(post.called) + self.assertEqual(len(listener.messages), 2) + listener.handle(3) + self.assertTrue(post.called) + self.assertEqual(len(listener.messages), 0) + listener.handle(4) + self.assertEqual(post.call_count, 1) + self.assertEqual(len(listener.messages), 1) + + @patch('stackify.handler.LogMsg') + @patch('stackify.handler.StackifyListener.send_group') + def test_clear_queue_shutdown(self, send_group, logmsg): + '''The listener sends the leftover messages on the queue when shutting down''' + listener = StackifyListener(queue_=Mock(), max_batch=3, config=self.config) + listener.http.identified = True + listener._thread = Mock() + + listener.handle(1) + listener.handle(2) + self.assertFalse(send_group.called) + listener.stop() + self.assertTrue(send_group.called) + + @patch('stackify.handler.LogMsg') + @patch('stackify.handler.LogMsgGroup') + @patch('stackify.handler.HTTPClient.send_log_group') + def test_send_group_crash(self, send_log_group, logmsggroup, logmsg): + '''The listener drops messages after retrying''' + listener = StackifyListener(queue_=Mock(), max_batch=3, config=self.config) + listener.http.identified = True + + send_log_group.side_effect = Exception + + listener.handle(1) + listener.handle(2) + listener.handle(3) + self.assertEqual(len(listener.messages), 0) + listener.handle(4) + self.assertEqual(len(listener.messages), 1) + self.assertEqual(send_log_group.call_count, 1) + + +if __name__=='__main__': + unittest.main() + diff --git a/tests/test_http.py b/tests/test_http.py new file mode 100644 index 0000000..225e8c9 --- /dev/null +++ b/tests/test_http.py @@ -0,0 +1,171 @@ +""" +Test the stackify.http module +""" + +import unittest +from mock import patch, Mock +import imp + +import retrying +import stackify.http + +from stackify.log import LogMsgGroup +from stackify.application import ApiConfiguration +from stackify import READ_TIMEOUT + +old_retry = retrying.retry + +def fake_retry_decorator(retries): + def fake_retry(*args, **kwargs): + kwargs['wait_exponential_max'] = 0 # no delay between retries + kwargs['stop_max_attempt_number'] = retries + def inner(func): + return old_retry(*args, **kwargs)(func) + return inner + return fake_retry + + +class TestClient(unittest.TestCase): + ''' + Test the HTTP Client and associated utilities + ''' + + @classmethod + def setUpClass(cls): + cls.FAKE_RETRIES = 3 + retrying.retry = fake_retry_decorator(cls.FAKE_RETRIES) + imp.reload(stackify.http) + + @classmethod + def tearDownClass(cls): + imp.reload(retrying) + imp.reload(stackify.http) + + def setUp(self): + self.config = ApiConfiguration( + application = 'test_appname', + environment = 'test_environment', + api_key = 'test_apikey', + api_url = 'test_apiurl') + + self.client = stackify.http.HTTPClient(self.config) + + def test_logger_no_config(self): + '''GZIP encoder works''' + correct = list(b'\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xf3H\xcd\xc9\xc9\xd7Q(\xcf/\xcaIQ\x04\x00\xe6\xc6\xe6\xeb\r\x00\x00\x00') + gzipped = list(stackify.http.gzip_compress('Hello, world!')) + gzipped[4:8] = b'\x00\x00\x00\x00' # blank the mtime + self.assertEqual(gzipped, correct) + + def test_identify_retrying(self): + '''HTTP identify should retry''' + client = self.client + + class CustomException(Exception): pass + crash = Mock(side_effect=CustomException) + + with patch.object(client, 'POST', crash): + with self.assertRaises(CustomException): + client.identify_application() + self.assertEqual(crash.call_count, self.FAKE_RETRIES) + + def test_identify(self): + '''HTTP identify should save results''' + client = self.client + self.assertFalse(client.identified) + + result = { + 'AppNameID': '1', + 'AppEnvID': '2', + 'DeviceID': '3', + 'DeviceAppID': '4', + 'DeviceAlias': '5', + } + post = Mock(return_value=result) + + with patch.object(client, 'POST', post): + client.identify_application() + + self.assertEqual(client.app_name_id, '1') + self.assertEqual(client.app_env_id, '2') + self.assertEqual(client.device_id, '3') + self.assertEqual(client.device_app_id, '4') + self.assertEqual(client.device_alias, '5') + self.assertTrue(client.identified) + + def test_send_log_group_retrying(self): + '''HTTP sending log groups should retry''' + client = self.client + + class CustomException(Exception): pass + crash = Mock(side_effect=CustomException) + + group = LogMsgGroup([]) + + with patch.object(client, 'POST', crash): + with self.assertRaises(CustomException): + client.send_log_group(group) + self.assertEqual(crash.call_count, self.FAKE_RETRIES) + + def test_send_log_group(self): + '''Send log group fills out info and submits ok''' + client = self.client + client.identified = True + + client.device_id = 'test_d_id' + client.device_app_id = 'test_dapp_id' + client.app_name_id = 'test_name_id' + client.device_alias = 'test_alias' + + group = LogMsgGroup([]) + + with patch.object(client, 'POST') as post: + client.send_log_group(group) + self.assertTrue(post.called) + + self.assertEqual(group.CDID, client.device_id) + self.assertEqual(group.CDAppID, client.device_app_id) + self.assertEqual(group.AppNameID, client.app_name_id) + self.assertEqual(group.ServerName, client.device_alias) + + + @patch('requests.post') + def test_post_arguments(self, post): + '''HTTP post has correct headers''' + client = self.client + payload = Mock() + + client.POST('url', payload) + + headers = { + 'Content-Type': 'application/json', + 'X-Stackify-Key': self.config.api_key, + 'X-Stackify-PV': 'V1', + } + + self.assertTrue(post.called) + args, kwargs = post.call_args + self.assertEquals(kwargs['headers'], headers) + self.assertEquals(kwargs['timeout'], READ_TIMEOUT) + self.assertEquals(kwargs['data'], payload.toJSON()) + + @patch('requests.post') + def test_post_gzip(self, post): + '''HTTP post uses gzip if requested''' + client = self.client + payload = Mock() + payload.toJSON = Mock(return_value='1') + gzip = Mock(side_effect=lambda x: x + '_gzipped') + + with patch.object(stackify.http, 'gzip_compress', gzip): + client.POST('url', payload, use_gzip=True) + + self.assertTrue(post.called) + args, kwargs = post.call_args + self.assertEquals(kwargs['headers']['Content-Encoding'], 'gzip') + self.assertEquals(kwargs['data'], '1_gzipped') + + +if __name__=='__main__': + unittest.main() + diff --git a/tests/test_init.py b/tests/test_init.py new file mode 100644 index 0000000..3a24a8a --- /dev/null +++ b/tests/test_init.py @@ -0,0 +1,117 @@ +""" +Test the stackify.__init__ setup functions +""" + +import unittest +from mock import patch, Mock +from .bases import ClearEnvTest + +import os +import atexit + +import stackify +import logging + + +class TestInit(ClearEnvTest): + ''' + Test the logger init functionality + ''' + + def setUp(self): + super(TestInit, self).setUp() + self.config = stackify.ApiConfiguration( + application = 'test_appname', + environment = 'test_environment', + api_key = 'test_apikey', + api_url = 'test_apiurl') + self.loggers = [] + + def tearDown(self): + super(TestInit, self).tearDown() + global_loggers = logging.Logger.manager.loggerDict + for logger in self.loggers: + del global_loggers[logger.name] + + def test_logger_no_config(self): + '''Logger API config loads from the environment automatically''' + env_map = { + 'STACKIFY_APPLICATION': 'test2_appname', + 'STACKIFY_ENVIRONMENT': 'test2_environment', + 'STACKIFY_API_KEY': 'test2_apikey', + 'STACKIFY_API_URL': 'test2_apiurl', + } + + with patch.dict('os.environ', env_map): + logger = stackify.getLogger(auto_shutdown=False) + self.loggers.append(logger) + + config = logger.handlers[0].listener.http.api_config + + self.assertEqual(config.application, 'test2_appname') + self.assertEqual(config.environment, 'test2_environment') + self.assertEqual(config.api_key, 'test2_apikey') + self.assertEqual(config.api_url, 'test2_apiurl') + + def test_logger_api_config(self): + '''Logger API config loads from the specified config objects''' + logger = stackify.getLogger(config=self.config, auto_shutdown=False) + self.loggers.append(logger) + + config = logger.handlers[0].listener.http.api_config + + self.assertEqual(config.application, 'test_appname') + self.assertEqual(config.environment, 'test_environment') + self.assertEqual(config.api_key, 'test_apikey') + self.assertEqual(config.api_url, 'test_apiurl') + + def test_logger_name(self): + '''The automatic logger name is the current module''' + self.assertEqual(stackify.getCallerName(), 'tests.test_init') + + def test_get_logger_defaults(self): + '''The logger has sane defaults''' + env_map = { + 'STACKIFY_APPLICATION': 'test2_appname', + 'STACKIFY_ENVIRONMENT': 'test2_environment', + 'STACKIFY_API_KEY': 'test2_apikey', + } + + with patch.dict('os.environ', env_map): + logger = stackify.getLogger(auto_shutdown=False) + self.loggers.append(logger) + + handler = logger.handlers[0] + config = handler.listener.http.api_config + + self.assertEqual(logger.name, 'tests.test_init') + self.assertEqual(config.api_url, stackify.API_URL) + self.assertEqual(handler.listener.max_batch, stackify.MAX_BATCH) + self.assertEqual(handler.queue.maxsize, stackify.QUEUE_SIZE) + # nose will goof with the following assert + #self.assertEqual(logger.getEffectiveLevel(), logging.WARNING) + + def test_get_logger_reuse(self): + '''Grabbing a logger twice results in the same logger''' + logger = stackify.getLogger(config=self.config, auto_shutdown=False) + self.loggers.append(logger) + logger_two = stackify.getLogger(config=self.config, auto_shutdown=False) + self.assertIs(logger_two, logger) + + @patch('atexit.register') + def test_logger_atexit(self, func): + '''Logger registers an atexit function to clean up''' + logger = stackify.getLogger(config=self.config) + self.loggers.append(logger) + func.assert_called_with(stackify.stopLogging, logger) + + def test_get_handlers(self): + '''Registered handlers are provided by getHandlers''' + logger = stackify.getLogger(config=self.config, auto_shutdown=False) + self.loggers.append(logger) + self.assertEqual(logger.handlers, stackify.getHandlers(logger)) + + +if __name__=='__main__': + unittest.main() + diff --git a/tests/test_log.py b/tests/test_log.py new file mode 100644 index 0000000..32f20a6 --- /dev/null +++ b/tests/test_log.py @@ -0,0 +1,66 @@ +""" +Test the stackify.log module +""" + +import unittest +from mock import patch, Mock +import json +import sys + +import stackify.log + +from stackify.log import LogMsg +import logging +import json +import time +#logging.LogRecord('name','level','pathname','lineno','msg','args','exc_info','func') + + +class TestLogPopulate(unittest.TestCase): + ''' + Test populating log objects with data + ''' + + def test_record_to_error(self): + '''LogMsgs can load logger records''' + record = logging.LogRecord('name',logging.WARNING,'pathname',32, + 'message %s',('here'),(),'func') + record.my_extra = [1,2,3] + msg = LogMsg() + msg.from_record(record) + + curr_ms = time.time() * 1000 + + self.assertEqual(msg.SrcMethod, 'func') + self.assertEqual(msg.SrcLine, 32) + self.assertEqual(msg.Th, 'MainThread') + self.assertEqual(msg.Msg, 'message here') + self.assertTrue(msg.EpochMs <= curr_ms) + self.assertEqual(json.loads(msg.data), {'my_extra':[1,2,3]}) + + def test_record_exception(self): + '''LogMsgs can parse exception information''' + class CustomException(Exception): + def __str__(self): + return 'My custom exception' + + try: + raise CustomException() + except: + record = logging.LogRecord('my exception',logging.WARNING,'somepath',12, + 'a thing happened',(),sys.exc_info()) + + msg = LogMsg() + msg.from_record(record) + + self.assertEqual(msg.Ex.OccurredEpochMillis, msg.EpochMs) + stack = msg.Ex.Error.StackTrace[0] + self.assertTrue(stack.CodeFileName.endswith('test_log.py')) + self.assertEqual(msg.Ex.Error.Message, 'My custom exception') + self.assertEqual(msg.Ex.Error.ErrorType, 'CustomException') + self.assertEqual(msg.Ex.Error.SourceMethod, 'test_record_exception') + + +if __name__=='__main__': + unittest.main() +