Skip to content
Browse files

Added pytest_coverage - a wrapper of coverage for code coverage metri…

…cs of your pytests

Added pytest_xmlresult - an xml output of tests - compatible with cruisecontrol
  • Loading branch information...
1 parent fb33f28 commit 8d55ce89e95f22c75229da00ad2865361546dc89 @rozza committed Sep 30, 2009
Showing with 308 additions and 0 deletions.
  1. +12 −0 README
  2. +89 −0 pytest_coverage.py
  3. +207 −0 pytest_xmlresult.py
View
12 README
@@ -0,0 +1,12 @@
+===============
+py.test plugins
+===============
+
+Some plugins to share:
+
+* pytest_coverage
+ Uses the coverage library to measure code metrics.
+ The html report for coverage is especially nice.
+
+* pytest_xmlresult
+ Provides xml results for your tests - so you can use it with cruisecontrol
View
89 pytest_coverage.py
@@ -0,0 +1,89 @@
+"""
+Write and report coverage data with 'coverage.py'.
+
+Requires Ned Batchelder's excellent coverage:
+http://nedbatchelder.com/code/coverage/
+"""
+import py
+
+coverage = py.test.importorskip("coverage")
+cov = coverage.coverage()
+
+def pytest_addoption(parser):
+ group = parser.addgroup('Coverage options')
+ group.addoption('--cov-show-missing', action='store', default=None,
+ dest='show_missing',
+ help='Show missing files')
+ group.addoption('--cov-report', action='store', default=None,
+ dest='report', type="choice", choices=['report', 'annotate', 'html'],
+ help="""
+ html: Directory for html output
+ report: Output a text report,
+ annotate: Annotate your source code for which lines were executed and which were not
+ """.strip())
+ group.addoption('--cov-directory', action='store', default=None,
+ dest='directory',
+ help='Directory for the reports (html / annotate results) defaults to ./coverage')
+ group.addoption('--cov-ignore-errors', action='store', default=None,
+ dest='ignore_errors',
+ help='Ignore errors')
+ group.addoption('--cov-omit', action='store', default=None,
+ dest='omit',
+ help='File with coverage files to omit')
+
+def pytest_configure(config):
+ cov.use_cache(0) # Do not cache any of the coverage.py stuff
+ cov.start()
+
+def pytest_terminal_summary(terminalreporter):
+ config = terminalreporter.config
+ tw = terminalreporter._tw
+ tw.sep('-', 'coverage')
+ tw.line('Processing Coverage...')
+ cov.stop()
+ cov.save()
+
+ # Get the configurations
+ config = terminalreporter.config
+
+ show_missing = config.getvalue('show_missing')
+ omit = config.getvalue('omit')
+ report = config.getvalue('report') or 'report'
+ directory = config.getvalue('directory') or 'coverage'
+
+ # Set up the report_args
+ report_args = {
+ 'morfs': [],
+ 'ignore_errors': config.getvalue('ignore_errors'),
+ }
+
+ # Handle any omits
+ if omit:
+ try:
+ omit_file = py.path.local(omit)
+ omit_prefixes = [line.strip() for line in omit_file.readlines()]
+ report_args['omit_prefixes'] = omit_prefixes
+ except:
+ pass
+
+ if report == 'report':
+ cov.report(show_missing=show_missing, **report_args)
+ if report == 'annotate':
+ cov.annotate(directory=directory, **report_args)
+ if report == 'html':
+ cov.html_report(directory=directory, **report_args)
+
+def test_functional(testdir):
+ py.test.importorskip("coverage")
+ testdir.plugins.append("coverage")
+ testdir.makepyfile("""
+ def f():
+ x = 42
+ def test_whatever():
+ pass
+ """)
+ result = testdir.runpytest()
+ assert result.ret == 0
+ assert result.stdout.fnmatch_lines([
+ '*Processing Coverage*'
+ ])
View
207 pytest_xmlresult.py
@@ -0,0 +1,207 @@
+"""
+ xmlresult plugin for machine-readable logging of test results.
+ Useful for cruisecontrol integration code.
+
+ An adaptation of pytest_resultlog.py
+"""
+
+import time
+
+def pytest_addoption(parser):
+ group = parser.addgroup("xmlresult", "xmlresult plugin options")
+ group.addoption('--xmlresult', action="store", dest="xmlresult", metavar="path", default=None,
+ help="path for machine-readable xml result log.")
+
+def pytest_configure(config):
+ xmlresult = config.option.xmlresult
+ if xmlresult:
+ logfile = open(xmlresult, 'w', 1) # line buffered
+ config._xmlresult = XMLResult(logfile)
+ config.pluginmanager.register(config._xmlresult)
+
+def pytest_unconfigure(config):
+ xmlresult = getattr(config, '_xmlresult', None)
+ if xmlresult:
+ xmlresult.logfile.close()
+ del config._xmlresult
+ config.pluginmanager.unregister(xmlresult)
+
+def generic_path(item):
+ chain = item.listchain()
+ gpath = [chain[0].name]
+ fspath = chain[0].fspath
+ fspart = False
+ for node in chain[1:]:
+ newfspath = node.fspath
+ if newfspath == fspath:
+ if fspart:
+ gpath.append(':')
+ fspart = False
+ else:
+ gpath.append('.')
+ else:
+ gpath.append('/')
+ fspart = True
+ name = node.name
+ if name[0] in '([':
+ gpath.pop()
+ gpath.append(name)
+ fspath = newfspath
+ return ''.join(gpath)
+
+class XMLResult(object):
+ test_start_time = 0.0
+ test_taken_time = 0.0
+ test_count = 0
+ error_count = 0
+ failure_count = 0
+ skip_count = 0
+
+ class _StringStream:
+ def __init__(self):
+ self.reset()
+ return
+
+ def write(self, string):
+ self._string += string
+ return
+
+ def read(self):
+ return self._string
+
+ def reset(self):
+ self._string = ""
+
+ def __init__(self, logfile):
+ self.logfile = logfile
+ self.test_logs = self._StringStream()
+
+ def write_log_entry(self, testpath, shortrepr, longrepr):
+ self.test_count += 1
+ # Create an xml log entry for the tests
+ self.test_logs.write('<testcase test_method="%s" name="%s" time="%.3f">' % (testpath.split(':')[-1], testpath, self.test_taken_time))
+
+ # Do we have any other data to capture for Errors, Fails and Skips
+ if shortrepr in ['E', 'F', 'S']:
+
+ if shortrepr == 'E':
+ self.error_count += 1
+ elif shortrepr == 'F':
+ self.failure_count += 1
+ elif shortrepr == 'S':
+ self.skip_count += 1
+
+ tag_map = {'E': 'error', 'F': 'failure', 'S': 'skipped'}
+ self.test_logs.write("<%s>" % tag_map[shortrepr])
+
+ # Output any more information
+ for line in longrepr.splitlines():
+ self.test_logs.write("<![CDATA[%s\n]]>" % line)
+ self.test_logs.write("</%s>" % tag_map[shortrepr])
+ self.test_logs.write("</testcase>")
+
+ def log_outcome(self, node, shortrepr, longrepr):
+ testpath = generic_path(node)
+ self.write_log_entry(testpath, shortrepr, longrepr)
+
+ def pytest_runtest_logreport(self, report):
+ code = report.shortrepr
+ if report.passed:
+ longrepr = ""
+ code = "."
+ elif report.failed:
+ longrepr = str(report.longrepr)
+ code = "F"
+ elif report.skipped:
+ code = "S"
+ longrepr = str(report.longrepr.reprcrash.message)
+ self.log_outcome(report.item, code, longrepr)
+
+ def pytest_runtest_setup(self, item):
+ self.test_start_time = time.time()
+
+ def pytest_runtest_teardown(self, item):
+ self.test_taken_time = time.time() - self.test_start_time
+
+ def pytest_collectreport(self, report):
+ if not report.passed:
+ if report.failed:
+ code = "F"
+ else:
+ assert report.skipped
+ code = "S"
+ longrepr = str(report.longrepr.reprcrash)
+ self.log_outcome(report.collector, code, longrepr)
+
+ def pytest_internalerror(self, excrepr):
+ path = excrepr.reprcrash.path
+ self.errors += 1
+ self.write_log_entry(path, '!', str(excrepr))
+
+ def pytest_sessionstart(self, session):
+ self.suite_start_time = time.time()
+
+ def pytest_sessionfinish(self, session, exitstatus):
+ """
+ Write the xml output
+ """
+ suite_stop_time = time.time()
+ suite_time_delta = suite_stop_time - self.suite_start_time
+ self.logfile.write('<testsuite ')
+ self.logfile.write('errors="%i" ' % self.error_count)
+ self.logfile.write('failures="%i" ' % self.failure_count)
+ self.logfile.write('skips="%i" ' % self.skip_count)
+ self.logfile.write('name="" ')
+ self.logfile.write('tests="%i" ' % self.test_count)
+ self.logfile.write('time="%.3f"' % suite_time_delta)
+ self.logfile.write(' >')
+ self.logfile.write(self.test_logs.read())
+ self.logfile.write('</testsuite>')
+ self.logfile.close()
+
+
+# Tests
+def test_generic(testdir, LineMatcher):
+ testdir.plugins.append("resultlog")
+ testdir.makepyfile("""
+ import py
+ def test_pass():
+ pass
+ def test_fail():
+ assert 0
+ def test_skip():
+ py.test.skip("")
+ """)
+ testdir.runpytest("--xmlresult=result.xml")
+ lines = testdir.tmpdir.join("result.xml").readlines(cr=0)
+ print lines
+ LineMatcher(lines).fnmatch_lines([
+ '*testsuite errors="0" failures="1" skips="1" name="" tests="3"*'
+ ])
+ LineMatcher(lines).fnmatch_lines([
+ '*<failure><![CDATA[def test_fail():*'
+ ])
+ LineMatcher(lines).fnmatch_lines([
+ '*<skipped><![CDATA[Skipped: <Skipped instance>*'
+ ])
+
+def test_generic_path():
+ from py.__.test.collect import Node, Item, FSCollector
+ p1 = Node('a')
+ assert p1.fspath is None
+ p2 = Node('B', parent=p1)
+ p3 = Node('()', parent = p2)
+ item = Item('c', parent = p3)
+
+ res = generic_path(item)
+ assert res == 'a.B().c'
+
+ p0 = FSCollector('proj/test')
+ p1 = FSCollector('proj/test/a', parent=p0)
+ p2 = Node('B', parent=p1)
+ p3 = Node('()', parent = p2)
+ p4 = Node('c', parent=p3)
+ item = Item('[1]', parent = p4)
+
+ res = generic_path(item)
+ assert res == 'test/a:B().c[1]'

0 comments on commit 8d55ce8

Please sign in to comment.
Something went wrong with that request. Please try again.