From 6c133a1c1467f81955fb0e2dbb420c9288d76adc Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Wed, 26 Jul 2017 15:31:27 -0400 Subject: [PATCH 1/5] Copy subunit-trace and colorizer from os-testr This commit copies over the subunit-trace and colorizer (it's only in repo dependency) from the os-testr project. [1] These modules will be used as the new default output filter from stestr, but to avoid a potential circular dependency (the long term plan is to switch os-testr to use stestr underneath) we need the subunit-trace modules to live locally. [1] http://git.openstack.org/cgit/openstack/os-testr --- stestr/colorizer.py | 98 +++++ stestr/subunit_trace.py | 403 ++++++++++++++++++ stestr/tests/sample_streams/all_skips.subunit | Bin 0 -> 2453 bytes stestr/tests/sample_streams/failure.subunit | Bin 0 -> 26667 bytes .../tests/sample_streams/successful.subunit | Bin 0 -> 12563 bytes stestr/tests/test_subunit_trace.py | 110 +++++ test-requirements.txt | 1 + 7 files changed, 612 insertions(+) create mode 100644 stestr/colorizer.py create mode 100755 stestr/subunit_trace.py create mode 100644 stestr/tests/sample_streams/all_skips.subunit create mode 100644 stestr/tests/sample_streams/failure.subunit create mode 100644 stestr/tests/sample_streams/successful.subunit create mode 100644 stestr/tests/test_subunit_trace.py diff --git a/stestr/colorizer.py b/stestr/colorizer.py new file mode 100644 index 00000000..8ecec358 --- /dev/null +++ b/stestr/colorizer.py @@ -0,0 +1,98 @@ +# Copyright 2015 NEC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Colorizer Code is borrowed from Twisted: +# Copyright (c) 2001-2010 Twisted Matrix Laboratories. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +import sys + + +class AnsiColorizer(object): + """A colorizer is an object that loosely wraps around a stream + + allowing callers to write text to the stream in a particular color. + + Colorizer classes must implement C{supported()} and C{write(text, color)}. + """ + _colors = dict(black=30, red=31, green=32, yellow=33, + blue=34, magenta=35, cyan=36, white=37) + + def __init__(self, stream): + self.stream = stream + + @classmethod + def supported(cls, stream=sys.stdout): + """Check the current platform supports coloring terminal output + + A class method that returns True if the current platform supports + coloring terminal output using this method. Returns False otherwise. + """ + if not stream.isatty(): + return False # auto color only on TTYs + try: + import curses + except ImportError: + return False + else: + try: + try: + return curses.tigetnum("colors") > 2 + except curses.error: + curses.setupterm() + return curses.tigetnum("colors") > 2 + except Exception: + # guess false in case of error + return False + + def write(self, text, color): + """Write the given text to the stream in the given color. + + @param text: Text to be written to the stream. + + @param color: A string label for a color. e.g. 'red', 'white'. + """ + color = self._colors[color] + self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text)) + + +class NullColorizer(object): + """See _AnsiColorizer docstring.""" + def __init__(self, stream): + self.stream = stream + + @classmethod + def supported(cls, stream=sys.stdout): + return True + + def write(self, text, color): + self.stream.write(text) diff --git a/stestr/subunit_trace.py b/stestr/subunit_trace.py new file mode 100755 index 00000000..6995319a --- /dev/null +++ b/stestr/subunit_trace.py @@ -0,0 +1,403 @@ +#!/usr/bin/env python + +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# Copyright 2014 Samsung Electronics +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Trace a subunit stream in reasonable detail and high accuracy.""" +from __future__ import absolute_import + +import argparse +import datetime +import functools +import os +import re +import sys + +import pbr.version +import subunit +import testtools + +from stestr import colorizer + +# NOTE(mtreinish) on python3 anydbm was renamed dbm and the python2 dbm module +# was renamed to dbm.ndbm, this block takes that into account +try: + import anydbm as dbm +except ImportError: + import dbm + +DAY_SECONDS = 60 * 60 * 24 +FAILS = [] +RESULTS = {} + + +def total_seconds(timedelta): + # NOTE(mtreinish): This method is built-in to the timedelta class in + # python >= 2.7 it is here to enable it's use on older versions + return ((timedelta.days * DAY_SECONDS + timedelta.seconds) * 10 ** 6 + + timedelta.microseconds) / 10 ** 6 + + +def cleanup_test_name(name, strip_tags=True, strip_scenarios=False): + """Clean up the test name for display. + + By default we strip out the tags in the test because they don't help us + in identifying the test that is run to it's result. + + Make it possible to strip out the testscenarios information (not to + be confused with tempest scenarios) however that's often needed to + indentify generated negative tests. + """ + if strip_tags: + tags_start = name.find('[') + tags_end = name.find(']') + if tags_start > 0 and tags_end > tags_start: + newname = name[:tags_start] + newname += name[tags_end + 1:] + name = newname + + if strip_scenarios: + tags_start = name.find('(') + tags_end = name.find(')') + if tags_start > 0 and tags_end > tags_start: + newname = name[:tags_start] + newname += name[tags_end + 1:] + name = newname + + return name + + +def get_duration(timestamps): + start, end = timestamps + if not start or not end: + duration = '' + else: + delta = end - start + duration = '%d.%06ds' % ( + delta.days * DAY_SECONDS + delta.seconds, delta.microseconds) + return duration + + +def find_worker(test): + """Get the worker number. + + If there are no workers because we aren't in a concurrent environment, + assume the worker number is 0. + """ + for tag in test['tags']: + if tag.startswith('worker-'): + return int(tag[7:]) + return 0 + + +# Print out stdout/stderr if it exists, always +def print_attachments(stream, test, all_channels=False): + """Print out subunit attachments. + + Print out subunit attachments that contain content. This + runs in 2 modes, one for successes where we print out just stdout + and stderr, and an override that dumps all the attachments. + """ + channels = ('stdout', 'stderr') + for name, detail in test['details'].items(): + # NOTE(sdague): the subunit names are a little crazy, and actually + # are in the form pythonlogging:'' (with the colon and quotes) + name = name.split(':')[0] + if detail.content_type.type == 'test': + detail.content_type.type = 'text' + if (all_channels or name in channels) and detail.as_text(): + title = "Captured %s:" % name + stream.write("\n%s\n%s\n" % (title, ('~' * len(title)))) + # indent attachment lines 4 spaces to make them visually + # offset + for line in detail.as_text().split('\n'): + line = line.encode('utf8') + stream.write(" %s\n" % line) + + +def find_test_run_time_diff(test_id, run_time): + times_db_path = os.path.join(os.path.join(os.getcwd(), '.testrepository'), + 'times.dbm') + if os.path.isfile(times_db_path): + try: + test_times = dbm.open(times_db_path) + except Exception: + return False + try: + avg_runtime = float(test_times.get(str(test_id), False)) + except Exception: + try: + avg_runtime = float(test_times[str(test_id)]) + except Exception: + avg_runtime = False + + if avg_runtime and avg_runtime > 0: + run_time = float(run_time.rstrip('s')) + perc_diff = ((run_time - avg_runtime) / avg_runtime) * 100 + return perc_diff + return False + + +def show_outcome(stream, test, print_failures=False, failonly=False, + enable_diff=False, threshold='0', abbreviate=False, + enable_color=False): + global RESULTS + status = test['status'] + # TODO(sdague): ask lifeless why on this? + if status == 'exists': + return + + worker = find_worker(test) + name = cleanup_test_name(test['id']) + duration = get_duration(test['timestamps']) + + if worker not in RESULTS: + RESULTS[worker] = [] + RESULTS[worker].append(test) + + # don't count the end of the return code as a fail + if name == 'process-returncode': + return + + for color in [colorizer.AnsiColorizer, colorizer.NullColorizer]: + if not enable_color: + color = colorizer.NullColorizer(stream) + break + if color.supported(): + color = color(stream) + break + + if status == 'fail' or status == 'uxsuccess': + FAILS.append(test) + if abbreviate: + color.write('F', 'red') + else: + stream.write('{%s} %s [%s] ... ' % ( + worker, name, duration)) + color.write('FAILED', 'red') + stream.write('\n') + if not print_failures: + print_attachments(stream, test, all_channels=True) + elif not failonly: + if status == 'success' or status == 'xfail': + if abbreviate: + color.write('.', 'green') + else: + out_string = '{%s} %s [%s' % (worker, name, duration) + perc_diff = find_test_run_time_diff(test['id'], duration) + if enable_diff: + if perc_diff and abs(perc_diff) >= abs(float(threshold)): + if perc_diff > 0: + out_string = out_string + ' +%.2f%%' % perc_diff + else: + out_string = out_string + ' %.2f%%' % perc_diff + stream.write(out_string + '] ... ') + color.write('ok', 'green') + stream.write('\n') + print_attachments(stream, test) + elif status == 'skip': + if abbreviate: + color.write('S', 'blue') + else: + reason = test['details'].get('reason', '') + if reason: + reason = ': ' + reason.as_text() + stream.write('{%s} %s ... ' % ( + worker, name)) + color.write('SKIPPED', 'blue') + stream.write('%s' % (reason)) + stream.write('\n') + else: + if abbreviate: + stream.write('%s' % test['status'][0]) + else: + stream.write('{%s} %s [%s] ... %s\n' % ( + worker, name, duration, test['status'])) + if not print_failures: + print_attachments(stream, test, all_channels=True) + + stream.flush() + + +def print_fails(stream): + """Print summary failure report. + + Currently unused, however there remains debate on inline vs. at end + reporting, so leave the utility function for later use. + """ + if not FAILS: + return + stream.write("\n==============================\n") + stream.write("Failed %s tests - output below:" % len(FAILS)) + stream.write("\n==============================\n") + for f in FAILS: + stream.write("\n%s\n" % f['id']) + stream.write("%s\n" % ('-' * len(f['id']))) + print_attachments(stream, f, all_channels=True) + stream.write('\n') + + +def count_tests(key, value): + count = 0 + for k, v in RESULTS.items(): + for item in v: + if key in item: + if re.search(value, item[key]): + count += 1 + return count + + +def run_time(): + runtime = 0.0 + for k, v in RESULTS.items(): + for test in v: + test_dur = get_duration(test['timestamps']).strip('s') + # NOTE(toabctl): get_duration() can return an empty string + # which leads to a ValueError when casting to float + if test_dur: + runtime += float(test_dur) + return runtime + + +def worker_stats(worker): + tests = RESULTS[worker] + num_tests = len(tests) + stop_time = tests[-1]['timestamps'][1] + start_time = tests[0]['timestamps'][0] + if not start_time or not stop_time: + delta = 'N/A' + else: + delta = stop_time - start_time + return num_tests, str(delta) + + +def print_summary(stream, elapsed_time): + stream.write("\n======\nTotals\n======\n") + stream.write("Ran: %s tests in %.4f sec.\n" % ( + count_tests('status', '.*'), total_seconds(elapsed_time))) + stream.write(" - Passed: %s\n" % count_tests('status', '^success$')) + stream.write(" - Skipped: %s\n" % count_tests('status', '^skip$')) + stream.write(" - Expected Fail: %s\n" % count_tests('status', '^xfail$')) + stream.write(" - Unexpected Success: %s\n" % count_tests('status', + '^uxsuccess$')) + stream.write(" - Failed: %s\n" % count_tests('status', '^fail$')) + stream.write("Sum of execute time for each test: %.4f sec.\n" % run_time()) + + # we could have no results, especially as we filter out the process-codes + if RESULTS: + stream.write("\n==============\nWorker Balance\n==============\n") + + for w in range(max(RESULTS.keys()) + 1): + if w not in RESULTS: + stream.write( + " - WARNING: missing Worker %s! " + "Race in testr accounting.\n" % w) + else: + num, time = worker_stats(w) + out_str = " - Worker %s (%s tests) => %s" % (w, num, time) + if time.isdigit(): + out_str += 's' + out_str += '\n' + stream.write(out_str) + + +__version__ = pbr.version.VersionInfo('stestr').version_string() + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--version', action='version', + version='%s' % __version__) + parser.add_argument('--no-failure-debug', '-n', action='store_true', + dest='print_failures', help='Disable printing failure ' + 'debug information in realtime') + parser.add_argument('--fails', '-f', action='store_true', + dest='post_fails', help='Print failure debug ' + 'information after the stream is proccesed') + parser.add_argument('--failonly', action='store_true', + dest='failonly', help="Don't print success items", + default=( + os.environ.get('TRACE_FAILONLY', False) + is not False)) + parser.add_argument('--abbreviate', '-a', action='store_true', + dest='abbreviate', help='Print one character status' + 'for each test') + parser.add_argument('--perc-diff', '-d', action='store_true', + dest='enable_diff', + help="Print percent change in run time on each test ") + parser.add_argument('--diff-threshold', '-t', dest='threshold', + help="Threshold to use for displaying percent change " + "from the avg run time. If one is not specified " + "the percent change will always be displayed") + parser.add_argument('--no-summary', action='store_true', + help="Don't print the summary of the test run after " + " completes") + parser.add_argument('--color', action='store_true', + help="Print results with colors") + return parser.parse_args() + + +def trace(stdin, stdout, print_failures=False, failonly=False, + enable_diff=False, abbreviate=False, color=False, post_fails=False, + no_summary=False): + stream = subunit.ByteStreamToStreamResult( + stdin, non_subunit_name='stdout') + outcomes = testtools.StreamToDict( + functools.partial(show_outcome, stdout, + print_failures=print_failures, + failonly=failonly, + enable_diff=enable_diff, + abbreviate=abbreviate, + enable_color=color)) + summary = testtools.StreamSummary() + result = testtools.CopyStreamResult([outcomes, summary]) + result = testtools.StreamResultRouter(result) + cat = subunit.test_results.CatFiles(stdout) + result.add_rule(cat, 'test_id', test_id=None) + start_time = datetime.datetime.utcnow() + result.startTestRun() + try: + stream.run(result) + finally: + result.stopTestRun() + stop_time = datetime.datetime.utcnow() + elapsed_time = stop_time - start_time + + if count_tests('status', '.*') == 0: + print("The test run didn't actually run any tests") + return 1 + if post_fails: + print_fails(stdout) + if not no_summary: + print_summary(stdout, elapsed_time) + + # NOTE(mtreinish): Ideally this should live in testtools streamSummary + # this is just in place until the behavior lands there (if it ever does) + if count_tests('status', '^success$') == 0: + print("\nNo tests were successful during the run") + return 1 + return 0 if summary.wasSuccessful() else 1 + + +def main(): + args = parse_args() + exit(trace(sys.stdin, sys.stdout, args.print_failures, args.failonly, + args.enable_diff, args.abbreviate, args.color, args.post_fails, + args.no_summary)) + + +if __name__ == '__main__': + main() diff --git a/stestr/tests/sample_streams/all_skips.subunit b/stestr/tests/sample_streams/all_skips.subunit new file mode 100644 index 0000000000000000000000000000000000000000..54156b5945c9fe6f598e9a788f470e5ead04d8e9 GIT binary patch literal 2453 zcmdn2&Eyah=Dp_J?d1^-4xYuSC7}h*If=!^3K}J;xdo}kC3=YknR+RSC5iC`Mfu68 z#l@L<>3StViFmMJaeiqLP*l$as3I854gtz(8YV2Bv{}2rVHyq07B5MyDA6y-NzBZ% zPR>Xy0y^5Zv?R@fttd6II6qG+I1wnAs8C#5P>^3#qL5jvP?TC&npu>Z@;m$Ixy{_MyYgT?zv7T3c5;z=P=IVxk!m*7C zW|1Fm(bsdlLE#og1y>-#>HO@OCqUr@j*aKHR$XuiCMlqjb4rU#Qj6k22{k7Xm?ZU_ zAtE7A0dNSmOyrjXh2R2^+i7MC`C+@>w)`F_Yzt`WK6t1bPrhTJ2nzLVkbPfgX)`!P zkQC~`sRR e6q=2VP|{EU literal 0 HcmV?d00001 diff --git a/stestr/tests/sample_streams/failure.subunit b/stestr/tests/sample_streams/failure.subunit new file mode 100644 index 0000000000000000000000000000000000000000..50709951b0236daa3ea04ce9c1f526cfbd89022c GIT binary patch literal 26667 zcmeHQ3z!tul}7Y4WRl$lbyb3d>m!*#nW3t>s~>|y8itu+6ajk%861Y9y1RO&q`RxA zhs^MZ731t*=;x0ctiGtFRoIw6_QrV z`tRQ{rhg|0o}nD>yFC=?^g?h!>Q%)|LKjS3(u9PpNpS@KwXI9)5lKX$Y!l^gQ?9Z@-JkQk22#OOJTXzy;>2Skp5O zn8Qzdz)Q|_3vo>n!CD15F|oOSs{`iaSJcYE3})a0lbvwJ6a*tv#r@9{+ai~hKQkGr z*AQUUNn%=vE1G7ihOA@+Nz)WMQ>9NAO%5ZZD=7K%1|r^) zcL90)_mBJ%fut#?nh!|i@?!;(fr-#prYIoX4gra52pI*?sbF+zRnZJ&8b~2hAikIz8SaCCrqhQDbbg-{mYaBWbWx2$mCp?N2knVJR7Xe%C465K}j$-M0Tj z4{5s~%sss1FhWo$o5go-j4bsGg3zrgrYfwGjUL$D1#rT~U3)HEjHp&qcGB6sBg@VO z^wZOB+lxR|%DLx(1Cg$g0-6>LnB^0uv50gH<=59$Gs65_P^({l;uD0bQP%u@p$P2b zmj7he?hR7^~x&Bw`$gpvM`MW3n&32qK8sqn0e{-lrFg^@tj%mPDal&&5gvR*& zH}2f#4OvHUKDp!UeJFSrHG(aZmq#utG9yw7776U)bjxpq-Q3X3xG@hlKr0YYu9UQt zsHzyr#ZqE?g`i2@Qm=391`WlxEcuQN9bOW5K=}R5mn=mPmr)QmtJ@=ugFqB!Si{gh zL?dJOZP-mS^ zRdRptI1g!`VL&K7T}_E{W=6b6)O5*cFpXsBWZg)BV~pGeQV~aZg5fB%W6w?_`$AJ)pOzb~?wbF@o z)BgNU?2($EJ>*QRw_0&c?A_nqFdrv&30+mY#1A9QgEkuKzv4BX@mkeBdg?&`Ifijqj+Co=em>`!2c^MWscVZ1t<8plVWDJq5B_ zQ7KIvAvpT!x^4HO0Ml6L_aH@yjhm$BA{lJza?h+JBk(k&pXB@K!)CZzZf z4gRK+p=Ao1YiH}Pn>zwLT##b*r#*&{7STQOt~1t0W*31}Dpb{>nv|04DE09#f8{Kk zk>!-=J?A~;1yM(6|LJe8|92F+joNBM;;Bfah|qQs1${+9Nks!Ep`=}vl&)Kj=qU0s zN^;hBYAPq`2+W_`*)bUfUO=hOY3^+(BJUt2&&Gg1Dv1dw3DNbD&j9d~RL5H(6!vmT zZc+No%E-9{-Mo@ri-OKa%Ki5)j$Bwu&?1RT&KfkSWx?~~QBKP_-rqh@5j96l|0U}V zFSEtWF1fZW!c?PJZ+dRof;L;sZ1-7eIbzz>95ML^ANsRhj+iSpm8H#9J+dL8m{T9x zWIAH5ntGrdF>PutF~4xb^FKu~FQJpZ{owi;r6#@2wpD7IkX7U948GRiKln6NW(Os< zV^X9NVvdmhYm}vbheFc4F1x1tse~edZf;| z|EW`URZh|o*uVMXwbN{Yv-iHgrDE#7*$+R|f#zXF6Ay}WD<|m)%nt|yzpw@Ff99lZ z6;tOPoIL(+Tj2g|l&PGgBe4JEFaP>4P+-f!>|?eR^9jSS2)eQSSN|laf2z?fxZ7oe zO9m<-R-!l)J!cKSjwm{HZTZ#`x{ukPp`&S$jD=XS%Y|x?tzZEzMLL-}h6x9Qyxv9X z>gtHHqN=J)->YD{WRRn%aUF??X2`qs+wU2bx_H@BtwRVg_>mX$I zyjhJG&ueXK7n)n=HFeQ1;+LHt_ztZZlW0>*QwLc~YHP7~g5Y>zK~$L28f|K9Ul3hF zf0a>c6S5x1x-)5S09^EG ztQ7OSdJzsN9XfaDH=#rnH#I1N#{0Rhty$6wHLhRg*JDt<)P& znF-1ImZkf8LQk;jp}N{_Vx86ISx+*GL13Z*N`lZLscTH$2I!~E$ zjcu`P9TV{LY|z>I)&wf$qKdkXC#nT}LgBE_@8ZLG#D{dUb!?bpc+N$9h?rvdsfj%7gU>+)H~Xk5ifl&WR@E9JxRzg$6He7q>24*fgh}w5^a;6!>s3Zyf zj@?vCQYwj=-Q-->&$1!LKcxH1GZ}uuCZ}F_l4G*Q_V@3rMk0>foj;#rJc@!#sIEHh~s9+AtLN#B>Nbpto z$tj%=x)44?rk?@??b5fzXxU|fN z+_Ev{ZsgWJ2yVg(z;ZhwZ0DSx;RbEzzH(XQJzs7+xAt1(;0rLE+hO?2cLU74a0){a7F6S!i35)NVa&3{M z86ur}LPspJnio(|~RsD2b7zq7g}q_fR>2Jc!Q) za#G|(wZv~|s9H>~o{-sMO3cKi!ry{ixd9BR$>Ewx8fFrvo`FAkb4U4e69~v1=R;<2Y-A1_5CqlnjSm0|TU{*f^f|aZJ!PE?9hA zkmZAHpz7|A_uqr1Gg{TJv>xRun$4Ziek&HuXcJ*}$4g_1_W2FBYG$1@L$yNCA$-$! z+;A`SY^k%Jq(!vvJ-E?J;H%Wghd7TAB`xNpN!sZZT;R+yk55EgREw2nGSs zMS75KvC^m0xvI;>Hq})xU03^`J1@A#YW>W5JZp~W@zM*|jYZRwsCCTPcWWei^g$KX zr>ZHqXDQabg?3Q!!u%)AaxWG-t<(`(65+B*ZnV{%Ra+nY1lJAiwa;GsY{y72aX|UA zSDX#)=Xp>+Ub)L-eza?A!)fDS(dI!Nczw%AFmXWf+rE2d1A?+T0T%rIwn)p-GliYG zR_=|roOEkO;weeYz{LwXjhNJcW>4P&6x`}I`uU789?*0|=3i&legj3ePC|dR_E50{v|(x- zrr=6R5Di1q7hm(%5|r9HmwV&U0dL5<1n2iXOi!O_H)Qt6JG+WC77bJI!KNZ$$0bEE zZ$kK6h{9@K`fclx5#Zs1^qt9<(*8x-1Go2qw+D*!td%Z!K!~4f8=caynszeN9=QK_ zxp{qs^c+!r6JGlRw8G@O>7RJJrbOR6v{utAuV5To!`6wN~dB7 z!bsbt^Pq*L6<*d=mHz5t8Js;t(Qw)VCoCC^A~L#2Z%T+MGH1@b;ShV9Xy5v4I3{bD&#zMQq$;DUg6Knczi{^@W&eq>VLjo-Yw%!?9S5NhWPAre&*` zDrXXqYL&x=b&*9jTu6+wF)&Fc4VnQ+YuPKpp-e%BI8O94p;#gTyNYQ7G%yo5c!wh< zjmTvwl5^2S9zH&v@bjTCFY$rAbz18>xdK>~yR_Y8kqyg;Tr80A$Cv)J?he!jHxU-S-hsJf#c3V zfvb)zfxBSf!Fvn>9Ta%s6Xyinu%r{{nuNU8#B(zu8+8Y-ev#e_qV3(DMI9%Mv#yKO zhQIOKiSJ>oWIa1%dCJkU)M#0%c$xkvQyDEw{mGZ57M*lg z3kFrMAAc`>oIIKo!;Au2?8V#OCgD2}aeDq8&e@ZiYrdO(!qcBGNe|@I+ zjmw*K_D2WyOlcoqIZ68nGVsRknNbYvE&}5Go*!P)8#=l#v9!2&dtt-j-G@+2Y;g?q z-~2*P*_3SQg272OaDJITAbszX!t+b3A*kfV%lsYn{5?V;CMK+B%pTgiem4re0)>9| z;Xf}tfvuQw$yT{53JLB}T?l~QrG_4-cz>9C(R2V{+Mbws= zz4(-Bs7vR1K%e>1^Q2g_W@+;%)PL8eaWA6OcW#NhX&#U ztO75+amRp{#BB`xmC0KVvS<1 literal 0 HcmV?d00001 diff --git a/stestr/tests/sample_streams/successful.subunit b/stestr/tests/sample_streams/successful.subunit new file mode 100644 index 0000000000000000000000000000000000000000..6f3b6641252484c5f40d630d4a28ac08208ba2bf GIT binary patch literal 12563 zcmeI2d2ka|9LEczh!vb7j>s?!3uyhLq%s4ihwX2QcgvIVNkBY0$QLj98-<~kz?c%6@9;#G>cA$)nwC4GSf_w?*4x7 z_xt?5@BQ*#DkECc$)j3_XPyE5lizct$thTtguB&Z_&+g`~YVjxKdv1p(?I^J=(}h4l7B6)~Pa+ z%f?ZX%14UhsWGO!Ec|40xEW`@o0Apf;@m7XICW5bN@`NN9Su!NO~?wR{`SDvOKdJ% zMA;G&6EdkcD3eKKlspQqMQJuPB*~T#Z%?z0Cja6EQw}FPh1Xe$vL|4ZNki%}C`zoN zjIX=F^Nx&6RvjwODdaT_u%3Hmw@`?)TX(bfExAL?aXiSW7XY~vW8oM#xyvc=PF0dv z-Z`GiNVBCR*wX89tMUrxHn|witxBxNo5b;oi7j%v)f_e})RW^l3g=yr;wVPSRSQ^N z!Rhk&ilcZzp>PS{;pb3=98TeV*VI)<=!q{fUdH7?yMtUFf~tg^94L;cjF?W_1NPOb zZJ{9Lvy!QA;Vk=sN-UIN#OC7lWidWc{9B=r;@EUPuPw-=JF)ZIwoE_m=!<0ILy>p| zkELE$hYc>BtBRtcz*F*8TyRvW*G=&Tu=)HWHR*j`zpx97N93hByV48e-uQL z1Qt()NJX*#RV3bN@6P3@aGy#}z0zr|zlQW>vf-gjnt)8a+;v6by)N`dOz{9vNj|Bx zpcM$nWXq=u?t)Cx19he8Wa+RCyFn+ziOJlE%mKPOvEtsuF4&-B@$_WS;vk+WV9Qs> z#6v0sVoW;naUjNY^%62;)TZeTK>grZOsdB_MAf3~3r82>^7;+c@<9+3Xz&@I2< z;QuIOn2vhx9JmptqvI`wvZlzL+p=mjMamIn1{E+?nuav7Pe2t_@$=Es54cH zR-O~D8#To=T0r`W9C?@}t(IpJYVje1A`2#3NRQ`HWD5vcQ{?4G!{>k^=aR{0N?0ac zY{(jk*FL`#IYQuBd@FtkGrBGWX>BDVD>+osWJad1k&3Ezh0Ss7Q0z;Jvch2bS<>Y4 zeqYrxw#iVnyT}-4w=1L9`CGO4G!BtQu0gvx+|;2pKZI06kz61_lqVi-jzQ>aVRYKz z;uanz$cDWz?KH^d`{qnjQ`kFgug%sJHn+DC#o3@u4Q|p<_T_GewbJ_}a+oeisxrhfEktqr>PK0k2^Jc3rS4Jz*)^3dOXA z3~NmnYttB(z6!3)`v@ObG_!$2T_5VRdlA(2;--~xtx|t~Dy9l5^;JB;?3x<7f=;Qg zQuUAaJlmG|NYC@j49~{p%C9O)fZG%Z|6(u89Yd|_*Uh{gsjC~yKj2@+b=?E_^MVq; zk7Cr%`%hg1idjJM#P&`Vbh3XG>keqW#|Y${w!BwaEhKR=1aax$loBJ2>od&ybAS6r zat{cZj0NKWVQw&@_Yv&#W9iv4AeapVPm0s3=;8hm{HG4RCqWy+CoF?3PJ=Kmx0&>; z!S?kz-rO<||JI>Q4H+^WEuN?Mop;(0MoxB$nhEYF(euxAS`^H=2.0 # BSD subunit2sql>=1.8.0 coverage>=4.0 # Apache-2.0 reno>=1.8.0 # Apache-2.0 +ddt>=1.0.1 # MIT From 161644f1ee642bb5e8d17b147749ec05aa4b9c42 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Wed, 26 Jul 2017 15:34:48 -0400 Subject: [PATCH 2/5] Switch the default output filter to subunit-trace This commit switches the default output filter to be subunit trace. This is configurable and if people desire they can either use the old output format, or straight subunit. But, the subunit-trace output is a lot more useful by default. --- stestr/commands/load.py | 26 ++++++++++++++++++++++++-- stestr/commands/run.py | 24 +++++++++++++++++------- 2 files changed, 41 insertions(+), 9 deletions(-) diff --git a/stestr/commands/load.py b/stestr/commands/load.py index 2eef631f..f91c84cd 100644 --- a/stestr/commands/load.py +++ b/stestr/commands/load.py @@ -13,6 +13,7 @@ """Load data into a repository.""" +import datetime import functools import sys @@ -23,6 +24,7 @@ from stestr.repository import abstract as repository from stestr.repository import util from stestr import results +from stestr import subunit_trace from stestr import utils @@ -40,6 +42,9 @@ def set_cli_opts(parser): parser.add_argument("--id", "-i", default=None, help="Append the stream into an existing entry in the " "repository") + parser.add_argument("--subunit-trace", action='store_true', default=False, + help="Display the loaded stream through the " + "subunit-trace output filter") def get_cli_help(): @@ -58,12 +63,13 @@ def run(arguments): args = arguments[0] load(repo_type=args.repo_type, repo_url=args.repo_url, partial=args.partial, subunit_out=args.subunit, - force_init=args.force_init, streams=arguments[1]) + force_init=args.force_init, streams=arguments[1], + pretty_out=args.subunit_trace) def load(force_init=False, in_streams=None, partial=False, subunit_out=False, repo_type='file', repo_url=None, - run_id=None, streams=None): + run_id=None, streams=None, pretty_out=False): """Load subunit streams into a repository :param bool force_init: Initialize the specifiedrepository if it hasn't @@ -77,6 +83,8 @@ def load(force_init=False, in_streams=None, :param str repo_url: The url of the repository to use. :param run_id: The optional run id to save the subunit stream to. :param list streams: A list of file paths to read for the input streams. + :param bool pretty_out: Use the subunit-trace output filter for the loaded + stream. """ try: @@ -116,6 +124,14 @@ def make_tests(): inserter = repo.get_inserter(partial=partial, run_id=run_id) if subunit_out: output_result, summary_result = output.make_result(inserter.get_id) + if pretty_out: + outcomes = testtools.StreamToDict( + functools.partial(subunit_trace.show_outcome, sys.stdout)) + summary_result = testtools.StreamSummary() + output_result = testtools.CopyStreamResult([outcomes, summary_result]) + output_result = testtools.StreamResultRouter(output_result) + cat = subunit.test_results.CatFiles(sys.stdout) + output_result.add_rule(cat, 'test_id', test_id=None) else: try: previous_run = repo.get_latest_run() @@ -125,11 +141,17 @@ def make_tests(): inserter.get_id, sys.stdout, previous_run) summary_result = output_result.get_summary() result = testtools.CopyStreamResult([inserter, output_result]) + start_time = datetime.datetime.utcnow() result.startTestRun() try: case.run(result) finally: result.stopTestRun() + stop_time = datetime.datetime.utcnow() + elapsed_time = stop_time - start_time + if pretty_out: + subunit_trace.print_fails(sys.stdout) + subunit_trace.print_summary(sys.stdout, elapsed_time) if not summary_result.wasSuccessful(): return 1 else: diff --git a/stestr/commands/run.py b/stestr/commands/run.py index 3ee98ba9..4531c7d7 100644 --- a/stestr/commands/run.py +++ b/stestr/commands/run.py @@ -86,6 +86,9 @@ def set_cli_opts(parser): parser.add_argument('--combine', action='store_true', default=False, help="Combine the results from the test run with the " "last run in the repository") + parser.add_argument('--no-subunit-trace', action='store_true', + default=False, + help='Disable the default subunit-trace output filter') def get_cli_help(): @@ -117,7 +120,8 @@ def run_command(config='.stestr.conf', repo_type='file', partial=False, subunit_out=False, until_failure=False, analyze_isolation=False, isolated=False, worker_path=None, blacklist_file=None, whitelist_file=None, black_regex=None, - no_discover=False, random=False, combine=False, filters=None): + no_discover=False, random=False, combine=False, filters=None, + pretty_out=True): """Function to execute the run command This function implements the run command. It will run the tests specified @@ -168,6 +172,7 @@ def run_command(config='.stestr.conf', repo_type='file', :param list filters: A list of string regex filters to initially apply on the test list. Tests that match any of the regexes will be used. (assuming any other filtering specified also uses it) + :param bool pretty_out: Use the subunit-trace output filter """ try: repo = util.get_repo_open(repo_type, repo_url) @@ -198,7 +203,8 @@ def run_tests(): return load.load(in_streams=run_proc, partial=partial, subunit_out=subunit_out, repo_type=repo_type, - repo_url=repo_url, run_id=combine_id) + repo_url=repo_url, run_id=combine_id, + pretty_out=pretty_out) if not until_failure: return run_tests() @@ -256,7 +262,8 @@ def run_tests(): subunit_out=subunit_out, combine_id=combine_id, repo_type=repo_type, - repo_url=repo_url) + repo_url=repo_url, + pretty_out=pretty_out) if run_result > result: result = run_result return result @@ -266,7 +273,8 @@ def run_tests(): subunit_out=subunit_out, combine_id=combine_id, repo_type=repo_type, - repo_url=repo_url) + repo_url=repo_url, + pretty_out=pretty_out) else: # Where do we source data about the cause of conflicts. # XXX: Should instead capture the run id in with the failing test @@ -411,7 +419,7 @@ def map_test(test_dict): def _run_tests(cmd, failing, analyze_isolation, isolated, until_failure, subunit_out=False, combine_id=None, repo_type='file', - repo_url=None): + repo_url=None, pretty_out=True): """Run the tests cmd was parameterised with.""" cmd.setUp() try: @@ -428,7 +436,8 @@ def run_tests(): return load.load((None, None), in_streams=run_procs, partial=partial, subunit_out=subunit_out, repo_type=repo_type, - repo_url=repo_url, run_id=combine_id) + repo_url=repo_url, run_id=combine_id, + pretty_out=pretty_out) if not until_failure: return run_tests() @@ -444,6 +453,7 @@ def run_tests(): def run(arguments): filters = arguments[1] or None args = arguments[0] + pretty_out = not args.no_subunit_trace return run_command( config=args.config, repo_type=args.repo_type, repo_url=args.repo_url, @@ -456,4 +466,4 @@ def run(arguments): worker_path=args.worker_path, blacklist_file=args.blacklist_file, whitelist_file=args.whitelist_file, black_regex=args.black_regex, no_discover=args.no_discover, random=args.random, combine=args.combine, - filters=filters) + filters=filters, pretty_out=pretty_out) From df4960ab095cc0a8484b1f08438ff9ab01f3cd8e Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Thu, 27 Jul 2017 12:32:29 -0400 Subject: [PATCH 3/5] Add support for setting colorized output We had to port the colorizer output modules when we migrated subunit-trace from os-testr. This commit puts it to use by adding cli options to stestr run and stestr load, the only 2 commands which currently use subunit-trace, to enable colorized output. --- stestr/commands/load.py | 12 +++++++++--- stestr/commands/run.py | 23 ++++++++++++++++------- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/stestr/commands/load.py b/stestr/commands/load.py index f91c84cd..7a786a4d 100644 --- a/stestr/commands/load.py +++ b/stestr/commands/load.py @@ -45,6 +45,10 @@ def set_cli_opts(parser): parser.add_argument("--subunit-trace", action='store_true', default=False, help="Display the loaded stream through the " "subunit-trace output filter") + parser.add_argument('--color', action='store_true', default=False, + help='Enable color output in the subunit-trace output,' + ' if subunit-trace output is enabled. If ' + 'subunit-trace is disable this does nothing.') def get_cli_help(): @@ -64,12 +68,12 @@ def run(arguments): load(repo_type=args.repo_type, repo_url=args.repo_url, partial=args.partial, subunit_out=args.subunit, force_init=args.force_init, streams=arguments[1], - pretty_out=args.subunit_trace) + pretty_out=args.subunit_trace, color=args.color) def load(force_init=False, in_streams=None, partial=False, subunit_out=False, repo_type='file', repo_url=None, - run_id=None, streams=None, pretty_out=False): + run_id=None, streams=None, pretty_out=False, color=False): """Load subunit streams into a repository :param bool force_init: Initialize the specifiedrepository if it hasn't @@ -85,6 +89,7 @@ def load(force_init=False, in_streams=None, :param list streams: A list of file paths to read for the input streams. :param bool pretty_out: Use the subunit-trace output filter for the loaded stream. + :param bool color: Enabled colorized subunit-trace output """ try: @@ -126,7 +131,8 @@ def make_tests(): output_result, summary_result = output.make_result(inserter.get_id) if pretty_out: outcomes = testtools.StreamToDict( - functools.partial(subunit_trace.show_outcome, sys.stdout)) + functools.partial(subunit_trace.show_outcome, sys.stdout, + enable_color=color)) summary_result = testtools.StreamSummary() output_result = testtools.CopyStreamResult([outcomes, summary_result]) output_result = testtools.StreamResultRouter(output_result) diff --git a/stestr/commands/run.py b/stestr/commands/run.py index 4531c7d7..d10cdb46 100644 --- a/stestr/commands/run.py +++ b/stestr/commands/run.py @@ -89,6 +89,11 @@ def set_cli_opts(parser): parser.add_argument('--no-subunit-trace', action='store_true', default=False, help='Disable the default subunit-trace output filter') + parser.add_argument('--color', action='store_true', default=False, + help='Enable color output in the subunit-trace output,' + ' if subunit-trace output is enabled. (this is ' + 'the default). If subunit-trace is disable this ' + ' does nothing.') def get_cli_help(): @@ -121,7 +126,7 @@ def run_command(config='.stestr.conf', repo_type='file', analyze_isolation=False, isolated=False, worker_path=None, blacklist_file=None, whitelist_file=None, black_regex=None, no_discover=False, random=False, combine=False, filters=None, - pretty_out=True): + pretty_out=True, color=False): """Function to execute the run command This function implements the run command. It will run the tests specified @@ -173,6 +178,7 @@ def run_command(config='.stestr.conf', repo_type='file', the test list. Tests that match any of the regexes will be used. (assuming any other filtering specified also uses it) :param bool pretty_out: Use the subunit-trace output filter + :param bool color: Enable colorized output in subunit-trace """ try: repo = util.get_repo_open(repo_type, repo_url) @@ -204,7 +210,8 @@ def run_tests(): partial=partial, subunit_out=subunit_out, repo_type=repo_type, repo_url=repo_url, run_id=combine_id, - pretty_out=pretty_out) + pretty_out=pretty_out, + color=color) if not until_failure: return run_tests() @@ -263,7 +270,8 @@ def run_tests(): combine_id=combine_id, repo_type=repo_type, repo_url=repo_url, - pretty_out=pretty_out) + pretty_out=pretty_out, + color=color) if run_result > result: result = run_result return result @@ -274,7 +282,8 @@ def run_tests(): combine_id=combine_id, repo_type=repo_type, repo_url=repo_url, - pretty_out=pretty_out) + pretty_out=pretty_out, + color=color) else: # Where do we source data about the cause of conflicts. # XXX: Should instead capture the run id in with the failing test @@ -419,7 +428,7 @@ def map_test(test_dict): def _run_tests(cmd, failing, analyze_isolation, isolated, until_failure, subunit_out=False, combine_id=None, repo_type='file', - repo_url=None, pretty_out=True): + repo_url=None, pretty_out=True, color=False): """Run the tests cmd was parameterised with.""" cmd.setUp() try: @@ -437,7 +446,7 @@ def run_tests(): partial=partial, subunit_out=subunit_out, repo_type=repo_type, repo_url=repo_url, run_id=combine_id, - pretty_out=pretty_out) + pretty_out=pretty_out, color=color) if not until_failure: return run_tests() @@ -466,4 +475,4 @@ def run(arguments): worker_path=args.worker_path, blacklist_file=args.blacklist_file, whitelist_file=args.whitelist_file, black_regex=args.black_regex, no_discover=args.no_discover, random=args.random, combine=args.combine, - filters=filters, pretty_out=pretty_out) + filters=filters, pretty_out=pretty_out, color=args.color) From 735f410b4f5ab3e6fba2fe1b3bc4391432f3dfde Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Thu, 27 Jul 2017 12:56:45 -0400 Subject: [PATCH 4/5] Add subunit-trace support to stestr last This commit adds the subunit-trace output filter to the stestr last command. This is enabled by default, the last command is used to see the results of the last command and the subunit-trace filter makes viewing test results much easier. --- stestr/commands/last.py | 48 ++++++++++++++++++++++--------- stestr/tests/test_return_codes.py | 6 ++-- 2 files changed, 38 insertions(+), 16 deletions(-) diff --git a/stestr/commands/last.py b/stestr/commands/last.py index d813405d..f92cf97a 100644 --- a/stestr/commands/last.py +++ b/stestr/commands/last.py @@ -17,6 +17,7 @@ from stestr import output from stestr.repository import util from stestr import results +from stestr import subunit_trace def get_cli_help(): @@ -35,16 +36,28 @@ def get_cli_help(): def set_cli_opts(parser): parser.add_argument( "--subunit", action="store_true", - default=False, help="Show output as a subunit stream."), + default=False, help="Show output as a subunit stream.") + parser.add_argument("--no-subunit-trace", action='store_true', + default=False, + help="Disable output with the subunit-trace output " + "filter") + parser.add_argument('--color', action='store_true', default=False, + help='Enable color output in the subunit-trace output,' + ' if subunit-trace output is enabled. (this is ' + 'the default). If subunit-trace is disable this ' + ' does nothing.') def run(arguments): args = arguments[0] + pretty_out = not args.no_subunit_trace return last(repo_type=args.repo_type, repo_url=args.repo_url, - subunit=args.subunit) + subunit_out=args.subunit, pretty_out=pretty_out, + color=args.color) -def last(repo_type='file', repo_url=None, subunit=False): +def last(repo_type='file', repo_url=None, subunit_out=False, pretty_out=True, + color=False): """Show the last run loaded into a a repository Note this function depends on the cwd for the repository if `repo_type` is @@ -54,11 +67,13 @@ def last(repo_type='file', repo_url=None, subunit=False): :param str repo_type: This is the type of repository to use. Valid choices are 'file' and 'sql'. :param str repo_url: The url of the repository to use. - :param bool subunit: Show output as a subunit stream. + :param bool subunit_out: Show output as a subunit stream. + :param pretty_out: Use the subunit-trace output filter. + :param color: Enable colorized output with the subunit-trace output filter. """ repo = util.get_repo_open(repo_type, repo_url) latest_run = repo.get_latest_run() - if subunit: + if subunit_out: stream = latest_run.get_subunit_stream() output.output_stream(stream) # Exits 0 if we successfully wrote the stream. @@ -74,15 +89,20 @@ def last(repo_type='file', repo_url=None, subunit=False): except KeyError: previous_run = None failed = False - output_result = results.CLITestResult(latest_run.get_id, sys.stdout, - previous_run) - summary = output_result.get_summary() - output_result.startTestRun() - try: - case.run(output_result) - finally: - output_result.stopTestRun() - failed = not summary.wasSuccessful() + if not pretty_out: + output_result = results.CLITestResult(latest_run.get_id, sys.stdout, + previous_run) + summary = output_result.get_summary() + output_result.startTestRun() + try: + case.run(output_result) + finally: + output_result.stopTestRun() + failed = not summary.wasSuccessful() + else: + stream = latest_run.get_subunit_stream() + failed = subunit_trace.trace(stream, sys.stdout, post_fails=True, + color=color) if failed: return 1 else: diff --git a/stestr/tests/test_return_codes.py b/stestr/tests/test_return_codes.py index 3d718e76..08dc82cb 100644 --- a/stestr/tests/test_return_codes.py +++ b/stestr/tests/test_return_codes.py @@ -108,7 +108,8 @@ def _get_cmd_stdout(self, cmd): def test_combine_results(self): self.assertRunExit('stestr run passing', 0) - stdout = self._get_cmd_stdout('stestr last') + stdout = self._get_cmd_stdout( + 'stestr last --no-subunit-trace') stdout = six.text_type(stdout[0]) test_count_split = stdout.split(' ') test_count = test_count_split[1] @@ -116,7 +117,8 @@ def test_combine_results(self): id_regex = re.compile('\(id=(.*?)\)') test_id = id_regex.search(stdout).group(0) self.assertRunExit('stestr run --combine passing', 0) - combine_stdout = self._get_cmd_stdout('stestr last')[0] + combine_stdout = self._get_cmd_stdout( + 'stestr last --no-subunit-trace')[0] combine_stdout = six.text_type(combine_stdout) combine_test_count_split = combine_stdout.split(' ') combine_test_count = combine_test_count_split[1] From 609c0a9aafdb57c5a0afa9e537b9404dee068af8 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Thu, 27 Jul 2017 13:06:11 -0400 Subject: [PATCH 5/5] Add docs for output filters This commit adds docs for subunit-trace and other output filters, as well as providing an end user guide to the stestr manual. --- doc/source/MANUAL.rst | 18 ++++++++++++++++++ doc/source/subunit_trace.rst | 7 +++++++ 2 files changed, 25 insertions(+) create mode 100644 doc/source/subunit_trace.rst diff --git a/doc/source/MANUAL.rst b/doc/source/MANUAL.rst index e952a367..4e506f99 100644 --- a/doc/source/MANUAL.rst +++ b/doc/source/MANUAL.rst @@ -146,6 +146,24 @@ selection options to do this, for example:: This will list all the tests which will be run by stestr using that combination of arguments. +Adjusting test run output +------------------------- + +By default the ``stestr run`` command uses an output filter called +subunit-trace. (as does the ``stestr last`` command) This displays the tests +as they are finished executing, as well as their worker and status. It also +prints aggregate numbers about the run at the end. You can read more about +subunit-trace in the module doc: :ref:`subunit_trace`. + +However, the test run output is configurable, you can disable this output +with the ``--no-subunit-trace`` flag which will be completely silent except for +any failures it encounters. There is also the ``--color`` flag which will enable +colorization with subunit-trace output. If you prefer to deal with the raw +subunit yourself and run your own output rendering or filtering you can use +the ``--subunit`` flag to output the result stream as raw subunit v2. + + + Combining Test Results ---------------------- There is sometimes a use case for running a single test suite split between diff --git a/doc/source/subunit_trace.rst b/doc/source/subunit_trace.rst new file mode 100644 index 00000000..c00cc8a0 --- /dev/null +++ b/doc/source/subunit_trace.rst @@ -0,0 +1,7 @@ +.. _subunit_trace: + +Subunit Trace +============= + +.. automodule:: stestr.subunit_trace + :members: