Skip to content

Commit

Permalink
Taking a first stab at metadata tagging of tests
Browse files Browse the repository at this point in the history
  • Loading branch information
jmvrbanac committed Nov 24, 2013
1 parent 521999e commit 36c3e0b
Show file tree
Hide file tree
Showing 9 changed files with 132 additions and 25 deletions.
19 changes: 10 additions & 9 deletions docs/using/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -71,12 +71,13 @@ Command-line Arguments
------------------------
Specter is a spec-based testing library to help facilitate BDD in Python.

================ ============
Argument Description
================ ============
-h, --help Show console help
--search PATH Specifies the search path for spec files
--no-art Disables the ASCII art on the runner
--coverage Enables coverage.py integration. Configure using .coveragerc
--select-module Selects a module path to run. Ex: spec.sample.TestClass
================ ============
===================== ============
Argument Description
===================== ============
-h, --help Show console help
--search PATH Specifies the search path for spec files
--no-art Disables the ASCII art on the runner
--coverage Enables coverage.py integration. Configure using .coveragerc
--select-module Selects a module path to run. Ex: spec.sample.TestClass
--select-by-metadata Selects tests to run by specifying a list of key=value pairs
===================== ============
5 changes: 5 additions & 0 deletions docs/writing_tests/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -203,3 +203,8 @@ Specter provided a few different ways of skipping tests.
.. autofunction:: specter.expect.incomplete()


Adding Metadata to Tests
~~~~~~~~~~~~~~~~~~~~~~~~~
Specter allows for you to tag tests with metadata. The primary purpose of this is to be able to carry misc information along with your test. At some point in the future, Specter will be able to output this information for consumption and processing. However, currently, metadata information can be used to select which tests you want to run.

.. autofunction:: specter.expect.metadata
19 changes: 19 additions & 0 deletions specter/expect.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,3 +171,22 @@ def it_should_do_something(self):
def skip_wrapper(*args, **kwargs):
raise TestIncompleteException(test_func, _('Test is incomplete'))
return skip_wrapper


def metadata(**key_value_pairs):
""" The metadata decorator allows for you to tag specific tests with
key/value data for run-time processing or reporting. The common use case
is to use metadata to tag a test as a positive or negative test type.
.. code-block:: python
# Example of using the metadata decorator
@metadata(type='negative')
def it_shouldnt_do_something(self):
pass
"""
def onTestFunc(func):
def onCall(*args, **kwargs):
return (func, key_value_pairs)
return onCall
return onTestFunc
12 changes: 11 additions & 1 deletion specter/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,11 @@ def setup_argparse(self):
help=_('Selects a module path to run. '
'Ex: spec.sample.TestClass'),
default=None)
self.arg_parser.add_argument(
'--select-by-metadata', dest='select_meta',
help=_('Selects tests to run by specifying a list of '
'key=value pairs you wish to run'),
default=[], nargs='*')

def generate_ascii_art(self):
tag_line = _('Keeping the boogy man away from your code!')
Expand All @@ -50,8 +55,13 @@ def generate_ascii_art(self):
return ascii_art

def run(self, args):
select_meta = None
self.arguments = self.arg_parser.parse_args(args)

if self.arguments.select_meta:
metas = [meta.split('=') for meta in self.arguments.select_meta]
select_meta = {meta[0]: meta[1].strip('"\'') for meta in metas}

if not self.arguments.no_art:
print(self.generate_ascii_art())

Expand All @@ -76,7 +86,7 @@ def run(self, args):

suite = suite_type()
self.collector.add_describe(suite)
suite.execute()
suite.execute(select_metadata=select_meta)

# Start Coverage Capture
if self.coverage:
Expand Down
43 changes: 32 additions & 11 deletions specter/spec.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
import inspect
import itertools
import sys
from random import shuffle

from time import time
from types import FunctionType, MethodType
from pyevents.manager import EventDispatcher
from pyevents.event import Event
from specter.util import get_real_last_traceback, convert_camelcase
from specter.util import (get_real_last_traceback, convert_camelcase,
find_by_metadata, extract_metadata,
children_with_tests_with_metadata)


class TimedObject(object):
Expand All @@ -28,7 +30,7 @@ def elapsed_time(self):


class CaseWrapper(TimedObject):
def __init__(self, case_func, parent, execute_kwargs=None):
def __init__(self, case_func, parent, execute_kwargs=None, metadata={}):
super(CaseWrapper, self).__init__()
self.case_func = case_func
self.expects = []
Expand All @@ -38,6 +40,7 @@ def __init__(self, case_func, parent, execute_kwargs=None):
self.incomplete = False
self.skip_reason = None
self.execute_kwargs = execute_kwargs
self.metadata = metadata

def execute(self, context=None):
kwargs = {}
Expand Down Expand Up @@ -82,8 +85,7 @@ class Describe(EventDispatcher):
def __init__(self, parent=None):
super(Describe, self).__init__()
self.parent = parent
self.cases = [CaseWrapper(case_func, parent=self)
for case_func in self.case_funcs]
self.cases = self.__wrappers__
self.describes = [desc_type(parent=self)
for desc_type in self.describe_types]

Expand All @@ -95,6 +97,15 @@ def name(self):
def doc(self):
return type(self).__doc__

@property
def __wrappers__(self):
wrappers = []
for case_func in self.case_funcs:
case_func, metadata = extract_metadata(case_func)
wrappers.append(CaseWrapper(case_func, parent=self,
metadata=metadata))
return wrappers

@classmethod
def __cls_members__(cls):
all_members = {}
Expand Down Expand Up @@ -157,7 +168,13 @@ def before_each(self):
def after_each(self):
pass

def execute(self):
def execute(self, select_metadata=None):

if select_metadata:
self.cases = find_by_metadata(select_metadata, self.cases)
self.describes = children_with_tests_with_metadata(
select_metadata, self)

# If it doesn't have tests or describes don't run it
if len(self.cases) <= 0 and len(self.describes) <= 0:
return
Expand All @@ -176,7 +193,7 @@ def execute(self):

# Execute Suites
for describe in self.describes:
describe.execute()
describe.execute(select_metadata=select_metadata)

self.after_all()
self.top_parent.dispatch(DescribeEvent(DescribeEvent.COMPLETE, self))
Expand Down Expand Up @@ -222,21 +239,25 @@ def __init__(self, parent=None):
# Generate new functions and monkey-patch
for case_func in self.case_funcs:
for name, args in self.DATASET.items():
func_name = '{0}_{1}'.format(case_func.__name__, name)
new_func = copy_function(case_func, func_name)
kwargs = get_function_kwargs(case_func, args)
extracted_func, metadata = extract_metadata(case_func)

func_name = '{0}_{1}'.format(extracted_func.__name__, name)
new_func = copy_function(extracted_func, func_name)
kwargs = get_function_kwargs(extracted_func, args)

# Monkey-patch and add to cases list
setattr(self, func_name, new_func)
self.cases.append(CaseWrapper(new_func, parent=self,
execute_kwargs=kwargs))
execute_kwargs=kwargs,
metadata=metadata))


def fixture(cls):
""" A simple decorator to set the fixture flag on the class."""
setattr(cls, '__FIXTURE__', True)
return cls


def copy_function(func, name):
py3 = (3, 0, 0)
code = (func.func_code
Expand Down
32 changes: 32 additions & 0 deletions specter/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,3 +85,35 @@ def get_real_last_traceback(exception):
traced_lines.append(' - Error: {0}'.format(exception))

return traced_lines


def find_by_metadata(meta, cases):
selected_cases = []
for case in cases:
matched_keys = set(meta.keys()) & set(case.metadata.keys())

for key in matched_keys:
if meta.get(key) == case.metadata.get(key):
selected_cases.append(case)

return selected_cases


def children_with_tests_with_metadata(meta, describe):
children = []
for child in describe.describes:
found = find_by_metadata(meta, child.cases)
if len(found) > 0:
children.append(child)
children.extend(children_with_tests_with_metadata(meta, child))
return children


def extract_metadata(case_func):
# Handle metadata decorator
metadata = {}
if 'onCall' in case_func.__name__:
decorator_data = case_func()
case_func = decorator_data[0]
metadata = decorator_data[1]
return case_func, metadata
6 changes: 5 additions & 1 deletion tests/example_data/example.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from specter.spec import Spec, DataDescribe
from specter.expect import expect, require, skip_if, incomplete
from specter.expect import expect, require, skip_if, incomplete, metadata


class TestObj(object):
Expand All @@ -23,6 +23,10 @@ def a_skipped_test(self):
def an_incomplete_test(self):
expect('this should never be called').to.equal(None)

@metadata(test='smoke')
def a_test_with_metadata(self):
expect(True).to.be_true()

def causing_a_traceback(self):
expect(Nope).to.be_none() # NOQA

Expand Down
10 changes: 8 additions & 2 deletions tests/test_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,14 @@ def test_run(self):
self.runner.run(args=['--search', './tests/example_data', '--no-art'])
self.assertEqual(len(self.runner.suite_types), 4)
self.assertEqual(self.runner.collector.skipped_tests, 1)
self.assertEqual(self.runner.collector.test_total, 10)
self.assertEqual(self.runner.collector.test_total, 11)

def test_run_w_coverage(self):
self.runner.run(args=['--search', './tests/example_data', '--no-art',
'--coverage'])
self.assertEqual(len(self.runner.suite_types), 4)
self.assertEqual(self.runner.collector.skipped_tests, 1)
self.assertEqual(self.runner.collector.test_total, 10)
self.assertEqual(self.runner.collector.test_total, 11)

def test_run_w_bad_path(self):
self.runner.run(args=['--search', './cobble'])
Expand All @@ -38,3 +38,9 @@ def test_run_w_select_module(self):
self.assertEqual(len(self.runner.suite_types), 1)
self.assertEqual(self.runner.collector.skipped_tests, 0)
self.assertEqual(self.runner.collector.test_total, 2)

def test_run_w_select_by_metadata(self):
self.runner.run(args=['--search', './tests/example_data', '--no-art',
'--select-by-metadata', 'test="smoke"'])
self.assertEqual(len(self.runner.suite_types), 4)
self.assertEqual(self.runner.collector.test_total, 1)
11 changes: 10 additions & 1 deletion tests/test_util.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from unittest import TestCase

from specter import util
from specter import util, spec


class TestSpecterUtil(TestCase):
Expand All @@ -16,3 +16,12 @@ def test_convert_camelcase_error(self):
def test_get_numbered_source_error(self):
result = util.get_numbered_source(None, 1)
self.assertIn('Error finding traceback!', result)

def test_find_by_metadata(self):
wrap1 = spec.CaseWrapper(None, None, metadata={'test': 'smoke'})
wrap2 = spec.CaseWrapper(None, None, metadata={'test': 'bam'})

test_list = [wrap1, wrap2]
found = util.find_by_metadata({'test': 'smoke'}, test_list)
self.assertEqual(len(found), 1)
self.assertIn(wrap1, found)

1 comment on commit 36c3e0b

@jmvrbanac
Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

#8

Please sign in to comment.