Skip to content

Commit

Permalink
[lit] Add --show-xxx command line options
Browse files Browse the repository at this point in the history
Provide `--show-xxx` flags for all non-failure result codes, just as we
already do for `--show-xfail` and `--show-unsupported`.

Reviewed By: jdenny

Differential Revision: https://reviews.llvm.org/D82233
  • Loading branch information
Julian Lettner committed Jul 9, 2020
1 parent 2308487 commit f06d242
Show file tree
Hide file tree
Showing 8 changed files with 48 additions and 15 deletions.
24 changes: 12 additions & 12 deletions llvm/utils/lit/lit/cl_arguments.py
Expand Up @@ -65,12 +65,18 @@ def parse_args():
dest="useProgressBar",
help="Do not use curses based progress bar",
action="store_false")
format_group.add_argument("--show-unsupported",
help="Show unsupported tests",
action="store_true")
format_group.add_argument("--show-xfail",
help="Show tests that were expected to fail",
action="store_true")

# Note: this does not generate flags for user-defined result codes.
success_codes = [c for c in lit.Test.ResultCode.all_codes()
if not c.isFailure]
for code in success_codes:
format_group.add_argument(
"--show-{}".format(code.name.lower()),
dest="shown_codes",
help="Show {} tests ({})".format(code.label.lower(), code.name),
action="append_const",
const=code,
default=[])

execution_group = parser.add_argument_group("Test Execution")
execution_group.add_argument("--path",
Expand Down Expand Up @@ -187,12 +193,6 @@ def parse_args():
else:
opts.shard = None

opts.show_results = set()
if opts.show_unsupported:
opts.show_results.add(lit.Test.UNSUPPORTED)
if opts.show_xfail:
opts.show_results.add(lit.Test.XFAIL)

opts.reports = filter(None, [opts.output, opts.xunit_xml_output])

return opts
Expand Down
6 changes: 3 additions & 3 deletions llvm/utils/lit/lit/main.py
Expand Up @@ -265,15 +265,15 @@ def print_results(tests, elapsed, opts):
tests_by_code[test.result.code].append(test)

for code in lit.Test.ResultCode.all_codes():
print_group(tests_by_code[code], code, opts.show_results)
print_group(tests_by_code[code], code, opts.shown_codes)

print_summary(tests_by_code, opts.quiet, elapsed)


def print_group(tests, code, show_results):
def print_group(tests, code, shown_codes):
if not tests:
return
if not code.isFailure and code not in show_results:
if not code.isFailure and code not in shown_codes:
return
print('*' * 20)
print('{} Tests ({}):'.format(code.label, len(tests)))
Expand Down
1 change: 1 addition & 0 deletions llvm/utils/lit/tests/Inputs/show-result-codes/fail.txt
@@ -0,0 +1 @@
RUN: false
6 changes: 6 additions & 0 deletions llvm/utils/lit/tests/Inputs/show-result-codes/lit.cfg
@@ -0,0 +1,6 @@
import lit.formats
config.name = 'show-result-codes'
config.suffixes = ['.txt']
config.test_format = lit.formats.ShTest()
config.test_source_root = None
config.test_exec_root = None
1 change: 1 addition & 0 deletions llvm/utils/lit/tests/Inputs/show-result-codes/pass.txt
@@ -0,0 +1 @@
RUN: true
2 changes: 2 additions & 0 deletions llvm/utils/lit/tests/Inputs/show-result-codes/unsupported.txt
@@ -0,0 +1,2 @@
REQUIRES: missing-feature
RUN: true
2 changes: 2 additions & 0 deletions llvm/utils/lit/tests/Inputs/show-result-codes/xfail.txt
@@ -0,0 +1,2 @@
XFAIL: *
RUN: false
21 changes: 21 additions & 0 deletions llvm/utils/lit/tests/show-result-codes.py
@@ -0,0 +1,21 @@
# Test the --show-<result-code> {pass,unsupported,xfail,...} options.
#
# RUN: not %{lit} %{inputs}/show-result-codes | FileCheck %s --check-prefix=NONE
# RUN: not %{lit} %{inputs}/show-result-codes --show-unsupported | FileCheck %s --check-prefix=ONE
# RUN: not %{lit} %{inputs}/show-result-codes --show-pass --show-xfail | FileCheck %s --check-prefix=MULTIPLE

# Failing tests are always shown
# NONE-NOT: Unsupported Tests (1)
# NONE-NOT: Passed Tests (1)
# NONE-NOT: Expectedly Failed Tests (1)
# NONE: Failed Tests (1)

# ONE: Unsupported Tests (1)
# ONE-NOT: Passed Tests (1)
# ONE-NOT: Expectedly Failed Tests (1)
# ONE: Failed Tests (1)

# MULTIPLE-NOT: Unsupported Tests (1)
# MULTIPLE: Passed Tests (1)
# MULTIPLE: Expectedly Failed Tests (1)
# MULTIPLE: Failed Tests (1)

0 comments on commit f06d242

Please sign in to comment.