diff --git a/compiler-rt/test/lit.common.cfg.py b/compiler-rt/test/lit.common.cfg.py index 9d2f02189b8bd..5a4e60ea34a8c 100644 --- a/compiler-rt/test/lit.common.cfg.py +++ b/compiler-rt/test/lit.common.cfg.py @@ -196,12 +196,12 @@ def push_dynamic_library_lookup_path(config, new_path): if test_cc_resource_dir is not None: test_cc_resource_dir = os.path.realpath(test_cc_resource_dir) if lit_config.debug: - lit_config.note(f"Resource dir for {config.clang} is {test_cc_resource_dir}") + lit_config.dbg(f"Resource dir for {config.clang} is {test_cc_resource_dir}") local_build_resource_dir = os.path.realpath(config.compiler_rt_output_dir) if test_cc_resource_dir != local_build_resource_dir and config.test_standalone_build_libs: if config.compiler_id == "Clang": if lit_config.debug: - lit_config.note( + lit_config.dbg( f"Overriding test compiler resource dir to use " f'libraries in "{config.compiler_rt_libdir}"' ) diff --git a/libcxx/test/selftest/dsl/dsl.sh.py b/libcxx/test/selftest/dsl/dsl.sh.py index 93f351f58eb4b..b8ee2ca3d6bb9 100644 --- a/libcxx/test/selftest/dsl/dsl.sh.py +++ b/libcxx/test/selftest/dsl/dsl.sh.py @@ -61,7 +61,7 @@ def setUp(self): self.litConfig = lit.LitConfig.LitConfig( progname="lit", path=[], - quiet=False, + diagnostic_level="note", useValgrind=False, valgrindLeakCheck=False, valgrindArgs=[], diff --git a/libcxx/utils/libcxx/test/config.py b/libcxx/utils/libcxx/test/config.py index 0840c46d7bfae..ea80134d064bf 100644 --- a/libcxx/utils/libcxx/test/config.py +++ b/libcxx/utils/libcxx/test/config.py @@ -22,6 +22,7 @@ def _appendToSubstitution(substitutions, key, value): def configure(parameters, features, config, lit_config): note = lambda s: lit_config.note("({}) {}".format(config.name, s)) + debug = lambda s: lit_config.dbg("({}) {}".format(config.name, s)) config.environment = dict(os.environ) # Apply the actions supplied by parameters to the configuration first, since @@ -32,7 +33,7 @@ def configure(parameters, features, config, lit_config): for action in actions: action.applyTo(config) if lit_config.debug: - note( + debug( "Applied '{}' as a result of parameter '{}'".format( action.pretty(config, lit_config.params), param.pretty(config, lit_config.params), @@ -45,7 +46,7 @@ def configure(parameters, features, config, lit_config): for action in actions: action.applyTo(config) if lit_config.debug: - note( + debug( "Applied '{}' as a result of implicitly detected feature '{}'".format( action.pretty(config, lit_config.params), feature.pretty(config) ) diff --git a/libcxx/utils/libcxx/test/dsl.py b/libcxx/utils/libcxx/test/dsl.py index 3fb30d82e0d24..88fc49160c56b 100644 --- a/libcxx/utils/libcxx/test/dsl.py +++ b/libcxx/utils/libcxx/test/dsl.py @@ -88,7 +88,7 @@ def _executeWithFakeConfig(test, commands): litConfig = lit.LitConfig.LitConfig( progname="lit", path=[], - quiet=False, + diagnostic_level="note", useValgrind=False, valgrindLeakCheck=False, valgrindArgs=[], diff --git a/llvm/utils/lit/lit/LitConfig.py b/llvm/utils/lit/lit/LitConfig.py index 8cef3c1fd8569..71dad85bbaddd 100644 --- a/llvm/utils/lit/lit/LitConfig.py +++ b/llvm/utils/lit/lit/LitConfig.py @@ -1,6 +1,7 @@ from __future__ import absolute_import import inspect import os +import enum import platform import sys @@ -25,7 +26,7 @@ def __init__( self, progname, path, - quiet, + diagnostic_level, useValgrind, valgrindLeakCheck, valgrindArgs, @@ -46,7 +47,7 @@ def __init__( self.progname = progname # The items to add to the PATH environment variable. self.path = [str(p) for p in path] - self.quiet = bool(quiet) + self.diagnostic_level = diagnostic_level self.useValgrind = bool(useValgrind) self.valgrindLeakCheck = bool(valgrindLeakCheck) self.valgrindUserArgs = list(valgrindArgs) @@ -155,8 +156,7 @@ def per_test_coverage(self, value): def load_config(self, config, path): """load_config(config, path) - Load a config object from an alternate path.""" - if self.debug: - self.note("load_config from %r" % path) + self.dbg("load_config from %r" % path) config.load_from_path(path, self) return config @@ -209,6 +209,8 @@ def getToolsPath(self, dir, paths, tools): return dir def _write_message(self, kind, message): + if not self.diagnostic_level_enabled(kind): + return # Get the file/line where this message was generated. f = inspect.currentframe() # Step out of _write_message, and then out of wrapper. @@ -234,13 +236,21 @@ def substitute(self, string): "unable to find %r parameter, use '--param=%s=VALUE'" % (key, key) ) + def diagnostic_level_enabled(self, kind): + if kind == "debug": + return self.debug + return DiagnosticLevel.create(self.diagnostic_level) >= DiagnosticLevel.create( + kind + ) + + def dbg(self, message): + self._write_message("debug", message) + def note(self, message): - if not self.quiet: - self._write_message("note", message) + self._write_message("note", message) def warning(self, message): - if not self.quiet: - self._write_message("warning", message) + self._write_message("warning", message) self.numWarnings += 1 def error(self, message): @@ -250,3 +260,25 @@ def error(self, message): def fatal(self, message): self._write_message("fatal", message) sys.exit(2) + + +@enum.unique +class DiagnosticLevel(enum.IntEnum): + FATAL = 0 + ERROR = 1 + WARNING = 2 + NOTE = 3 + + @classmethod + def create(cls, value): + if value == "fatal": + return cls.FATAL + if value == "error": + return cls.ERROR + if value == "warning": + return cls.WARNING + if value == "note": + return cls.NOTE + raise ValueError( + f"invalid diagnostic level {repr(value)} of type {type(value)}" + ) diff --git a/llvm/utils/lit/lit/LitTestCase.py b/llvm/utils/lit/lit/LitTestCase.py index 566d068ad11ea..690b7cb6f13d5 100644 --- a/llvm/utils/lit/lit/LitTestCase.py +++ b/llvm/utils/lit/lit/LitTestCase.py @@ -46,7 +46,7 @@ def load_test_suite(inputs): lit_config = lit.LitConfig.LitConfig( progname="lit", path=[], - quiet=False, + diagnostic_level="note", useValgrind=False, valgrindLeakCheck=False, valgrindArgs=[], diff --git a/llvm/utils/lit/lit/TestingConfig.py b/llvm/utils/lit/lit/TestingConfig.py index c250838250547..590fede0a2373 100644 --- a/llvm/utils/lit/lit/TestingConfig.py +++ b/llvm/utils/lit/lit/TestingConfig.py @@ -144,7 +144,7 @@ def load_from_path(self, path, litConfig): try: exec(compile(data, path, "exec"), cfg_globals, None) if litConfig.debug: - litConfig.note("... loaded config %r" % path) + litConfig.dbg("... loaded config %r" % path) except SystemExit: e = sys.exc_info()[1] # We allow normal system exit inside a config file to just diff --git a/llvm/utils/lit/lit/cl_arguments.py b/llvm/utils/lit/lit/cl_arguments.py index 8238bc42395af..c4d90e795d858 100644 --- a/llvm/utils/lit/lit/cl_arguments.py +++ b/llvm/utils/lit/lit/cl_arguments.py @@ -15,6 +15,59 @@ class TestOrder(enum.Enum): SMART = "smart" +@enum.unique +class TestOutputLevel(enum.IntEnum): + OFF = 0 + FAILED = 1 + ALL = 2 + + @classmethod + def create(cls, value): + if value == "off": + return cls.OFF + if value == "failed": + return cls.FAILED + if value == "all": + return cls.ALL + raise ValueError(f"invalid output level {repr(value)} of type {type(value)}") + + +class TestOutputAction(argparse.Action): + def __init__(self, option_strings, dest, **kwargs): + super().__init__(option_strings, dest, nargs=None, **kwargs) + + def __call__(self, parser, namespace, value, option_string=None): + TestOutputAction.setOutputLevel(namespace, self.dest, value) + + @classmethod + def setOutputLevel(cls, namespace, dest, value): + setattr(namespace, dest, value) + if dest == "test_output" and TestOutputLevel.create( + namespace.print_result_after + ) < TestOutputLevel.create(value): + setattr(namespace, "print_result_after", value) + elif dest == "print_result_after" and TestOutputLevel.create( + namespace.test_output + ) > TestOutputLevel.create(value): + setattr(namespace, "test_output", value) + + +class AliasAction(argparse.Action): + def __init__(self, option_strings, dest, nargs=None, **kwargs): + self.expansion = kwargs.pop("alias", None) + if not self.expansion: + raise ValueError("no aliases expansion provided") + super().__init__(option_strings, dest, nargs=0, **kwargs) + + def __call__(self, parser, namespace, value, option_string=None): + for e in self.expansion: + if callable(e): + e(namespace) + else: + dest, val = e + setattr(namespace, dest, val) + + def parse_args(): parser = argparse.ArgumentParser(prog="lit", fromfile_prefix_chars="@") parser.add_argument( @@ -55,41 +108,101 @@ def parse_args(): ) format_group = parser.add_argument_group("Output Format") - # FIXME: I find these names very confusing, although I like the - # functionality. format_group.add_argument( - "-q", "--quiet", help="Suppress no error output", action="store_true" + "--test-output", + help="Control whether the executed commands and their outputs are printed after each test has executed (default off)", + choices=["off", "failed", "all"], + default="off", + action=TestOutputAction, + ) + format_group.add_argument( + "--print-result-after", + help="Control which the executed test names and results are printed after each test has executed (default all)", + choices=["off", "failed", "all"], + default="all", + action=TestOutputAction, + ) + format_group.add_argument( + "--diagnostic-level", + help="Control how verbose lit diagnostics should be (default note)", + choices=["error", "warning", "note"], + default="note", + ) + format_group.add_argument( + "--terse-summary", + help="Print the elapsed time and the number of passed tests after all tests have finished (default on)", + action="store_true", + dest="terse_summary", + ) + format_group.add_argument( + "--no-terse-summary", + help="Don't show the elapsed time after all tests have finished, and only show the number of failed tests.", + action="store_false", + dest="terse_summary", + ) + parser.set_defaults(terse_summary=False) + format_group.add_argument( + "-q", + "--quiet", + help="Alias for '--diagnostic-level=error --test-output=off --terse-summary'", + action=AliasAction, + alias=[ + lambda namespace: TestOutputAction.setOutputLevel( + namespace, "print_result_after", "failed" + ), + lambda namespace: TestOutputAction.setOutputLevel( + namespace, "test_output", "off" + ), + ("diagnostic_level", "error"), + ("terse_summary", True), + ], ) format_group.add_argument( "-s", "--succinct", - help="Reduce amount of output." - " Additionally, show a progress bar," - " unless --no-progress-bar is specified.", - action="store_true", + help="Alias for '--progress-bar --print-result-after=failed'", + action=AliasAction, + alias=[ + ("useProgressBar", True), + lambda namespace: TestOutputAction.setOutputLevel( + namespace, "print_result_after", "failed" + ), + ], ) format_group.add_argument( "-v", "--verbose", - dest="showOutput", help="For failed tests, show all output. For example, each command is" " printed before it is executed, so the last printed command is the one" - " that failed.", - action="store_true", + " that failed. Alias for '--test-output=failed'", + action=AliasAction, + alias=[ + lambda namespace: TestOutputAction.setOutputLevel( + namespace, "test_output", "failed" + ), + ], ) format_group.add_argument( "-vv", "--echo-all-commands", - dest="showOutput", help="Deprecated alias for -v.", - action="store_true", + action=AliasAction, + alias=[ + lambda namespace: TestOutputAction.setOutputLevel( + namespace, "test_output", "failed" + ), + ], ) format_group.add_argument( "-a", "--show-all", - dest="showAllOutput", - help="Enable -v, but for all tests not just failed tests.", - action="store_true", + help="Enable -v, but for all tests not just failed tests. Alias for '--test-output=all'", + action=AliasAction, + alias=[ + lambda namespace: TestOutputAction.setOutputLevel( + namespace, "test_output", "all" + ), + ], ) format_group.add_argument( "-r", @@ -105,10 +218,16 @@ def parse_args(): help="Write test results to the provided path", metavar="PATH", ) + format_group.add_argument( + "--progress-bar", + dest="useProgressBar", + help="Show curses based progress bar", + action="store_true", + ) format_group.add_argument( "--no-progress-bar", dest="useProgressBar", - help="Do not use curses based progress bar", + help="Do not use curses based progress bar (default)", action="store_false", ) diff --git a/llvm/utils/lit/lit/discovery.py b/llvm/utils/lit/lit/discovery.py index 2e7f90c6bb0c9..879123296cdce 100644 --- a/llvm/utils/lit/lit/discovery.py +++ b/llvm/utils/lit/lit/discovery.py @@ -63,7 +63,7 @@ def search1(path): # We found a test suite, create a new config for it and load it. if litConfig.debug: - litConfig.note("loading suite config %r" % cfgpath) + litConfig.dbg("loading suite config %r" % cfgpath) cfg = TestingConfig.fromdefaults(litConfig) cfg.load_from_path(cfgpath, litConfig) @@ -116,7 +116,7 @@ def search1(path_in_suite): # file into it. config = copy.deepcopy(parent) if litConfig.debug: - litConfig.note("loading local config %r" % cfgpath) + litConfig.dbg("loading local config %r" % cfgpath) config.load_from_path(cfgpath, litConfig) return config @@ -138,7 +138,7 @@ def getTests(path, litConfig, testSuiteCache, localConfigCache): return (), () if litConfig.debug: - litConfig.note("resolved input %r to %r::%r" % (path, ts.name, path_in_suite)) + litConfig.dbg("resolved input %r to %r::%r" % (path, ts.name, path_in_suite)) return ts, getTestsInSuite( ts, diff --git a/llvm/utils/lit/lit/display.py b/llvm/utils/lit/lit/display.py index b565bbc7a4f93..4dc04d93d3ea7 100644 --- a/llvm/utils/lit/lit/display.py +++ b/llvm/utils/lit/lit/display.py @@ -2,7 +2,7 @@ def create_display(opts, tests, total_tests, workers): - if opts.quiet: + if opts.print_result_after == "off" and not opts.useProgressBar: return NopDisplay() num_tests = len(tests) @@ -10,7 +10,7 @@ def create_display(opts, tests, total_tests, workers): header = "-- Testing: %d%s tests, %d workers --" % (num_tests, of_total, workers) progress_bar = None - if opts.succinct and opts.useProgressBar: + if opts.useProgressBar: import lit.ProgressBar try: @@ -96,8 +96,8 @@ def update(self, test): show_result = ( test.isFailure() - or self.opts.showAllOutput - or (not self.opts.quiet and not self.opts.succinct) + and self.opts.print_result_after == "failed" + or self.opts.print_result_after == "all" ) if show_result: if self.progress_bar: @@ -134,7 +134,9 @@ def print_result(self, test): ) # Show the test failure output, if requested. - if (test.isFailure() and self.opts.showOutput) or self.opts.showAllOutput: + if ( + test.isFailure() and self.opts.test_output == "failed" + ) or self.opts.test_output == "all": if test.isFailure(): print("%s TEST '%s' FAILED %s" % ("*" * 20, test_name, "*" * 20)) out = test.result.output diff --git a/llvm/utils/lit/lit/llvm/config.py b/llvm/utils/lit/lit/llvm/config.py index 913ba69d63328..59982c94b787c 100644 --- a/llvm/utils/lit/lit/llvm/config.py +++ b/llvm/utils/lit/lit/llvm/config.py @@ -53,7 +53,10 @@ def __init__(self, lit_config, config): self.use_lit_shell = True global lit_path_displayed - if not self.lit_config.quiet and lit_path_displayed is False: + if ( + self.lit_config.diagnostic_level_enabled("note") + and lit_path_displayed is False + ): self.lit_config.note("using lit tools: {}".format(path)) lit_path_displayed = True @@ -527,7 +530,7 @@ def use_llvm_tool( if tool: tool = os.path.normpath(tool) - if not self.lit_config.quiet and not quiet: + if not quiet: self.lit_config.note("using {}: {}".format(name, tool)) return tool @@ -637,10 +640,9 @@ def clang_setup( ("%ms_abi_triple", self.make_msabi_triple(self.config.target_triple)) ) else: - if not self.lit_config.quiet: - self.lit_config.note( - "No default target triple was found, some tests may fail as a result." - ) + self.lit_config.note( + "No default target triple was found, some tests may fail as a result." + ) self.config.substitutions.append(("%itanium_abi_triple", "")) self.config.substitutions.append(("%ms_abi_triple", "")) diff --git a/llvm/utils/lit/lit/main.py b/llvm/utils/lit/lit/main.py index a585cc0abdd48..07e809b168dc2 100755 --- a/llvm/utils/lit/lit/main.py +++ b/llvm/utils/lit/lit/main.py @@ -30,7 +30,7 @@ def main(builtin_params={}): lit_config = lit.LitConfig.LitConfig( progname=os.path.basename(sys.argv[0]), path=opts.path, - quiet=opts.quiet, + diagnostic_level=opts.diagnostic_level, useValgrind=opts.useValgrind, valgrindLeakCheck=opts.valgrindLeakCheck, valgrindArgs=opts.valgrindArgs, @@ -332,7 +332,7 @@ def print_results(tests, elapsed, opts): opts.printPathRelativeCWD, ) - print_summary(total_tests, tests_by_code, opts.quiet, elapsed) + print_summary(total_tests, tests_by_code, opts.terse_summary, elapsed) def print_group(tests, code, shown_codes, printPathRelativeCWD): diff --git a/llvm/utils/lit/tests/Inputs/verbosity/fail.txt b/llvm/utils/lit/tests/Inputs/verbosity/fail.txt new file mode 100644 index 0000000000000..2bcca02683614 --- /dev/null +++ b/llvm/utils/lit/tests/Inputs/verbosity/fail.txt @@ -0,0 +1,2 @@ +RUN: echo "fail test output" +RUN: fail \ No newline at end of file diff --git a/llvm/utils/lit/tests/Inputs/verbosity/lit.cfg b/llvm/utils/lit/tests/Inputs/verbosity/lit.cfg new file mode 100644 index 0000000000000..c328b5b66c481 --- /dev/null +++ b/llvm/utils/lit/tests/Inputs/verbosity/lit.cfg @@ -0,0 +1,12 @@ +import lit.formats + +config.name = "verbosity" +config.suffixes = [".txt"] +config.test_format = lit.formats.ShTest() +config.test_source_root = None +config.test_exec_root = None + +lit_config.dbg("this is a debug log") +lit_config.note("this is a note") +lit_config.warning("this is a warning") +#lit_config.error("this is an error") diff --git a/llvm/utils/lit/tests/Inputs/verbosity/pass.txt b/llvm/utils/lit/tests/Inputs/verbosity/pass.txt new file mode 100644 index 0000000000000..f64843827e147 --- /dev/null +++ b/llvm/utils/lit/tests/Inputs/verbosity/pass.txt @@ -0,0 +1 @@ +RUN: echo "pass test output" \ No newline at end of file diff --git a/llvm/utils/lit/tests/Inputs/verbosity/unsupported.txt b/llvm/utils/lit/tests/Inputs/verbosity/unsupported.txt new file mode 100644 index 0000000000000..f5ebd4da178f8 --- /dev/null +++ b/llvm/utils/lit/tests/Inputs/verbosity/unsupported.txt @@ -0,0 +1,2 @@ +REQUIRES: asdf +RUN: not echo "unsupported test output" diff --git a/llvm/utils/lit/tests/Inputs/verbosity/xfail.txt b/llvm/utils/lit/tests/Inputs/verbosity/xfail.txt new file mode 100644 index 0000000000000..85001cc22b08e --- /dev/null +++ b/llvm/utils/lit/tests/Inputs/verbosity/xfail.txt @@ -0,0 +1,2 @@ +XFAIL: * +RUN: not echo "xfail test output" \ No newline at end of file diff --git a/llvm/utils/lit/tests/Inputs/verbosity/xpass.txt b/llvm/utils/lit/tests/Inputs/verbosity/xpass.txt new file mode 100644 index 0000000000000..87c95ec75ecdc --- /dev/null +++ b/llvm/utils/lit/tests/Inputs/verbosity/xpass.txt @@ -0,0 +1,2 @@ +XFAIL: * +RUN: echo "xpass test output" diff --git a/llvm/utils/lit/tests/lit-opts.py b/llvm/utils/lit/tests/lit-opts.py index a533a59d9d124..0759c1d17be58 100644 --- a/llvm/utils/lit/tests/lit-opts.py +++ b/llvm/utils/lit/tests/lit-opts.py @@ -12,13 +12,13 @@ # Check that LIT_OPTS understands multiple options with arbitrary spacing. # -# RUN: env LIT_OPTS='-a -v -Dvar=foobar' \ +# RUN: env LIT_OPTS='-v -a -Dvar=foobar' \ # RUN: %{lit} -s %{inputs}/lit-opts \ # RUN: | FileCheck -check-prefix=SHOW-ALL -DVAR=foobar %s # Check that LIT_OPTS parses shell-like quotes and escapes. # -# RUN: env LIT_OPTS='-a -v -Dvar="foo bar"\ baz' \ +# RUN: env LIT_OPTS='-v -a -Dvar="foo bar"\ baz' \ # RUN: %{lit} -s %{inputs}/lit-opts \ # RUN: | FileCheck -check-prefix=SHOW-ALL -DVAR="foo bar baz" %s diff --git a/llvm/utils/lit/tests/per-test-coverage-by-lit-cfg.py b/llvm/utils/lit/tests/per-test-coverage-by-lit-cfg.py index 189c1cebd623b..b3af606c52f18 100644 --- a/llvm/utils/lit/tests/per-test-coverage-by-lit-cfg.py +++ b/llvm/utils/lit/tests/per-test-coverage-by-lit-cfg.py @@ -1,10 +1,10 @@ # Test if lit_config.per_test_coverage in lit.cfg sets individual test case coverage. -# RUN: %{lit} -a -vv -Dexecute_external=False \ +# RUN: %{lit} -a -Dexecute_external=False \ # RUN: %{inputs}/per-test-coverage-by-lit-cfg/per-test-coverage-by-lit-cfg.py | \ # RUN: FileCheck -DOUT=stdout %s -# RUN: %{lit} -a -vv -Dexecute_external=True \ +# RUN: %{lit} -a -Dexecute_external=True \ # RUN: %{inputs}/per-test-coverage-by-lit-cfg/per-test-coverage-by-lit-cfg.py | \ # RUN: FileCheck -DOUT=stderr %s diff --git a/llvm/utils/lit/tests/per-test-coverage.py b/llvm/utils/lit/tests/per-test-coverage.py index cf5e82c44dc51..ba513554ae76e 100644 --- a/llvm/utils/lit/tests/per-test-coverage.py +++ b/llvm/utils/lit/tests/per-test-coverage.py @@ -1,10 +1,10 @@ # Test LLVM_PROFILE_FILE is set when --per-test-coverage is passed to command line. -# RUN: %{lit} -a -vv --per-test-coverage -Dexecute_external=False \ +# RUN: %{lit} -a --per-test-coverage -Dexecute_external=False \ # RUN: %{inputs}/per-test-coverage/per-test-coverage.py | \ # RUN: FileCheck -DOUT=stdout %s -# RUN: %{lit} -a -vv --per-test-coverage -Dexecute_external=True \ +# RUN: %{lit} -a --per-test-coverage -Dexecute_external=True \ # RUN: %{inputs}/per-test-coverage/per-test-coverage.py | \ # RUN: FileCheck -DOUT=stderr %s diff --git a/llvm/utils/lit/tests/shtest-cat.py b/llvm/utils/lit/tests/shtest-cat.py index 5efe25c41684a..9763f9fbf1a9d 100644 --- a/llvm/utils/lit/tests/shtest-cat.py +++ b/llvm/utils/lit/tests/shtest-cat.py @@ -1,6 +1,6 @@ ## Test the cat command. # -# RUN: not %{lit} -a -v %{inputs}/shtest-cat \ +# RUN: not %{lit} -v %{inputs}/shtest-cat \ # RUN: | FileCheck -match-full-lines %s # END. diff --git a/llvm/utils/lit/tests/shtest-env-negative.py b/llvm/utils/lit/tests/shtest-env-negative.py index c8b59b224e7c4..236c6a19e694b 100644 --- a/llvm/utils/lit/tests/shtest-env-negative.py +++ b/llvm/utils/lit/tests/shtest-env-negative.py @@ -1,6 +1,6 @@ ## Test the env command (failing tests). -# RUN: not %{lit} -a -v %{inputs}/shtest-env-negative \ +# RUN: not %{lit} -v %{inputs}/shtest-env-negative \ # RUN: | FileCheck -match-full-lines %s # # END. diff --git a/llvm/utils/lit/tests/shtest-env-path.py b/llvm/utils/lit/tests/shtest-env-path.py index bf459ae53fbc0..7f04756ed6ad5 100644 --- a/llvm/utils/lit/tests/shtest-env-path.py +++ b/llvm/utils/lit/tests/shtest-env-path.py @@ -1,9 +1,9 @@ ## Tests env command for setting the PATH variable. # The test is using /bin/sh. Limit to system known to have /bin/sh. -# REQUIRES: system-linux +# REQUIRES: system-linux || system-darwin -# RUN: %{lit} -a -v %{inputs}/shtest-env-path/path.txt \ +# RUN: %{lit} -a %{inputs}/shtest-env-path/path.txt \ # RUN: | FileCheck -match-full-lines %s # # END. diff --git a/llvm/utils/lit/tests/shtest-env-positive.py b/llvm/utils/lit/tests/shtest-env-positive.py index 4f07b69ecc7d3..089acd308c5c5 100644 --- a/llvm/utils/lit/tests/shtest-env-positive.py +++ b/llvm/utils/lit/tests/shtest-env-positive.py @@ -1,6 +1,6 @@ ## Test the env command (passing tests). -# RUN: %{lit} -a -v %{inputs}/shtest-env-positive \ +# RUN: %{lit} -a %{inputs}/shtest-env-positive \ # RUN: | FileCheck -match-full-lines %s # # END. diff --git a/llvm/utils/lit/tests/shtest-export.py b/llvm/utils/lit/tests/shtest-export.py index f2de8e8cd8b5f..d45a94a5eb830 100644 --- a/llvm/utils/lit/tests/shtest-export.py +++ b/llvm/utils/lit/tests/shtest-export.py @@ -1,6 +1,6 @@ ## Test the export command. -# RUN: not %{lit} -a -v %{inputs}/shtest-export \ +# RUN: not %{lit} -v %{inputs}/shtest-export \ # RUN: | FileCheck -match-full-lines %s # # END. diff --git a/llvm/utils/lit/tests/shtest-glob.py b/llvm/utils/lit/tests/shtest-glob.py index aa4705b634a7d..ba609e036c166 100644 --- a/llvm/utils/lit/tests/shtest-glob.py +++ b/llvm/utils/lit/tests/shtest-glob.py @@ -1,6 +1,6 @@ ## Tests glob pattern handling in echo command. -# RUN: not %{lit} -a -v %{inputs}/shtest-glob \ +# RUN: not %{lit} -v %{inputs}/shtest-glob \ # RUN: | FileCheck -dump-input=fail -match-full-lines --implicit-check-not=Error: %s # END. diff --git a/llvm/utils/lit/tests/shtest-not.py b/llvm/utils/lit/tests/shtest-not.py index b42769ffd9383..e735d38260b37 100644 --- a/llvm/utils/lit/tests/shtest-not.py +++ b/llvm/utils/lit/tests/shtest-not.py @@ -1,6 +1,6 @@ # Check the not command -# RUN: not %{lit} -a -v %{inputs}/shtest-not \ +# RUN: not %{lit} -a %{inputs}/shtest-not \ # RUN: | FileCheck -match-full-lines %s # # END. diff --git a/llvm/utils/lit/tests/shtest-pushd-popd.py b/llvm/utils/lit/tests/shtest-pushd-popd.py index f917c1a4a4599..799e9d6d65951 100644 --- a/llvm/utils/lit/tests/shtest-pushd-popd.py +++ b/llvm/utils/lit/tests/shtest-pushd-popd.py @@ -1,6 +1,6 @@ # Check the pushd and popd commands -# RUN: not %{lit} -a -v %{inputs}/shtest-pushd-popd \ +# RUN: not %{lit} -v %{inputs}/shtest-pushd-popd \ # RUN: | FileCheck -match-full-lines %s # # END. diff --git a/llvm/utils/lit/tests/shtest-readfile-external.py b/llvm/utils/lit/tests/shtest-readfile-external.py index 6fe1088efd674..0d8e3ad1242bf 100644 --- a/llvm/utils/lit/tests/shtest-readfile-external.py +++ b/llvm/utils/lit/tests/shtest-readfile-external.py @@ -4,7 +4,7 @@ # ALLOW_RETRIES: 2 # UNSUPPORTED: system-windows -# RUN: env LIT_USE_INTERNAL_SHELL=0 not %{lit} -a -v %{inputs}/shtest-readfile | FileCheck -match-full-lines -DTEMP_PATH=%S/Inputs/shtest-readfile/Output %s +# RUN: env LIT_USE_INTERNAL_SHELL=0 not %{lit} -v %{inputs}/shtest-readfile | FileCheck -match-full-lines -DTEMP_PATH=%S/Inputs/shtest-readfile/Output %s # CHECK: -- Testing: 5 tests{{.*}} diff --git a/llvm/utils/lit/tests/shtest-readfile.py b/llvm/utils/lit/tests/shtest-readfile.py index 218da2257bcff..ca57db82e6617 100644 --- a/llvm/utils/lit/tests/shtest-readfile.py +++ b/llvm/utils/lit/tests/shtest-readfile.py @@ -3,7 +3,7 @@ # TODO(boomanaiden154): This sometimes fails, possibly due to buffers not being flushed. # ALLOW_RETRIES: 2 -# RUN: env LIT_USE_INTERNAL_SHELL=1 not %{lit} -a -v %{inputs}/shtest-readfile | FileCheck -match-full-lines -DTEMP_PATH=%S%{fs-sep}Inputs%{fs-sep}shtest-readfile%{fs-sep}Output %s +# RUN: env LIT_USE_INTERNAL_SHELL=1 not %{lit} -v %{inputs}/shtest-readfile | FileCheck -match-full-lines -DTEMP_PATH=%S%{fs-sep}Inputs%{fs-sep}shtest-readfile%{fs-sep}Output %s # CHECK: -- Testing: 5 tests{{.*}} diff --git a/llvm/utils/lit/tests/shtest-ulimit-nondarwin.py b/llvm/utils/lit/tests/shtest-ulimit-nondarwin.py index d81cde0159792..d5340a7d2efb9 100644 --- a/llvm/utils/lit/tests/shtest-ulimit-nondarwin.py +++ b/llvm/utils/lit/tests/shtest-ulimit-nondarwin.py @@ -4,7 +4,7 @@ # These tests are specific to options that Darwin does not support. # UNSUPPORTED: system-windows, system-cygwin, system-darwin, system-aix, system-solaris -# RUN: not %{lit} -a -v %{inputs}/shtest-ulimit-nondarwin | FileCheck %s +# RUN: not %{lit} -v %{inputs}/shtest-ulimit-nondarwin | FileCheck %s # CHECK: -- Testing: 2 tests{{.*}} diff --git a/llvm/utils/lit/tests/shtest-ulimit.py b/llvm/utils/lit/tests/shtest-ulimit.py index 21e5a5e2491d1..582477bef65fc 100644 --- a/llvm/utils/lit/tests/shtest-ulimit.py +++ b/llvm/utils/lit/tests/shtest-ulimit.py @@ -8,7 +8,7 @@ # RUN: %{python} %S/Inputs/shtest-ulimit/print_limits.py | grep RLIMIT_NOFILE \ # RUN: | sed -n -e 's/.*=//p' | tr -d '\n' > %t.nofile_limit -# RUN: not %{lit} -a -v %{inputs}/shtest-ulimit --order=lexical \ +# RUN: not %{lit} -v %{inputs}/shtest-ulimit --order=lexical \ # RUN: | FileCheck -DBASE_NOFILE_LIMIT=%{readfile:%t.nofile_limit} %s # CHECK: -- Testing: 3 tests{{.*}} diff --git a/llvm/utils/lit/tests/shtest-umask.py b/llvm/utils/lit/tests/shtest-umask.py index e67f0308db661..8af81ec3b4ebd 100644 --- a/llvm/utils/lit/tests/shtest-umask.py +++ b/llvm/utils/lit/tests/shtest-umask.py @@ -1,6 +1,6 @@ # Check the umask command -# RUN: not %{lit} -a -v %{inputs}/shtest-umask | FileCheck -match-full-lines %s +# RUN: not %{lit} -v %{inputs}/shtest-umask | FileCheck -match-full-lines %s # TODO(boomanaiden154): We should be asserting that we get expected behavior # on Windows rather than just listing this as unsupported. # UNSUPPORTED: system-windows diff --git a/llvm/utils/lit/tests/unit/TestRunner.py b/llvm/utils/lit/tests/unit/TestRunner.py index 09470c7b9386e..a3fa62e1ef0e1 100644 --- a/llvm/utils/lit/tests/unit/TestRunner.py +++ b/llvm/utils/lit/tests/unit/TestRunner.py @@ -30,7 +30,7 @@ def load_keyword_parser_lit_tests(): lit_config = lit.LitConfig.LitConfig( progname="lit", path=[], - quiet=False, + diagnostic_level="note", useValgrind=False, valgrindLeakCheck=False, valgrindArgs=[], diff --git a/llvm/utils/lit/tests/verbosity.py b/llvm/utils/lit/tests/verbosity.py new file mode 100644 index 0000000000000..9b1690695d392 --- /dev/null +++ b/llvm/utils/lit/tests/verbosity.py @@ -0,0 +1,1130 @@ +# Test various combinations of options controlling lit stdout and stderr output + +# RUN: mkdir -p %t + +### Test default + +# RUN: not %{lit} %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + +# NO-ARGS: -- Testing: 5 tests, 1 workers -- +# NO-ARGS-NEXT: FAIL: verbosity :: fail.txt (1 of 5) +# NO-ARGS-NEXT: PASS: verbosity :: pass.txt (2 of 5) +# NO-ARGS-NEXT: {{UN}}SUPPORTED: verbosity :: unsupported.txt (3 of 5) +# NO-ARGS-NEXT: {{X}}FAIL: verbosity :: xfail.txt (4 of 5) +# NO-ARGS-NEXT: XPASS: verbosity :: xpass.txt (5 of 5) +# NO-ARGS-NEXT: ******************** +# NO-ARGS-NEXT: Failed Tests (1): +# NO-ARGS-NEXT: verbosity :: fail.txt +# NO-ARGS-EMPTY: +# NO-ARGS-NEXT: ******************** +# NO-ARGS-NEXT: Unexpectedly Passed Tests (1): +# NO-ARGS-NEXT: verbosity :: xpass.txt +# NO-ARGS-EMPTY: +# NO-ARGS-EMPTY: +# NO-ARGS-NEXT: Testing Time: {{.*}}s +# NO-ARGS-EMPTY: +# NO-ARGS-NEXT: Total Discovered Tests: 5 +# NO-ARGS-NEXT: Unsupported : 1 (20.00%) +# NO-ARGS-NEXT: Passed : 1 (20.00%) +# NO-ARGS-NEXT: Expectedly Failed : 1 (20.00%) +# NO-ARGS-NEXT: Failed : 1 (20.00%) +# NO-ARGS-NEXT: Unexpectedly Passed: 1 (20.00%) + +# NO-ARGS-ERR: lit.py: {{.*}}lit.cfg:{{[0-9]+}}: note: this is a note +# NO-ARGS-ERR-NEXT: lit.py: {{.*}}lit.cfg:{{[0-9]+}}: warning: this is a warning +# NO-ARGS-ERR-EMPTY: +# NO-ARGS-ERR-NEXT: 1 warning(s) in tests + + +### Test aliases + +# RUN: not %{lit} --succinct %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix SUCCINCT < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + +# SUCCINCT: -- Testing: 5 tests, 1 workers -- +# SUCCINCT-NEXT: Testing: +# SUCCINCT-NEXT: FAIL: verbosity :: fail.txt (1 of 5) +# SUCCINCT-NEXT: Testing: 0.. 10.. +# SUCCINCT-NEXT: XPASS: verbosity :: xpass.txt (5 of 5) +# SUCCINCT-NEXT: Testing: 0.. 10.. 20.. 30.. 40.. 50.. 60.. 70.. 80.. 90.. +# SUCCINCT-NEXT: ******************** +# SUCCINCT-NEXT: Failed Tests (1): +# SUCCINCT-NEXT: verbosity :: fail.txt +# SUCCINCT-EMPTY: +# SUCCINCT-NEXT: ******************** +# SUCCINCT-NEXT: Unexpectedly Passed Tests (1): +# SUCCINCT-NEXT: verbosity :: xpass.txt +# SUCCINCT-EMPTY: +# SUCCINCT-EMPTY: +# SUCCINCT-NEXT: Testing Time: {{.*}}s +# SUCCINCT-EMPTY: +# SUCCINCT-NEXT: Total Discovered Tests: 5 +# SUCCINCT-NEXT: Unsupported : 1 (20.00%) +# SUCCINCT-NEXT: Passed : 1 (20.00%) +# SUCCINCT-NEXT: Expectedly Failed : 1 (20.00%) +# SUCCINCT-NEXT: Failed : 1 (20.00%) +# SUCCINCT-NEXT: Unexpectedly Passed: 1 (20.00%) + +# RUN: not %{lit} --verbose %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix VERBOSE < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + +# VERBOSE: -- Testing: 5 tests, 1 workers -- +# VERBOSE-NEXT: FAIL: verbosity :: fail.txt (1 of 5) +# VERBOSE-NEXT: ******************** TEST 'verbosity :: fail.txt' FAILED ******************** +# VERBOSE-NEXT: Exit Code: 127 +# VERBOSE-EMPTY: +# VERBOSE-NEXT: Command Output (stdout): +# VERBOSE-NEXT: -- +# VERBOSE-NEXT: # {{R}}UN: at line 1 +# VERBOSE-NEXT: echo "fail test output" +# VERBOSE-NEXT: # executed command: echo 'fail test output' +# VERBOSE-NEXT: # .---command stdout------------ +# VERBOSE-NEXT: # | fail test output +# VERBOSE-NEXT: # `----------------------------- +# VERBOSE-NEXT: # {{R}}UN: at line 2 +# VERBOSE-NEXT: fail +# VERBOSE-NEXT: # executed command: fail +# VERBOSE-NEXT: # .---command stderr------------ +# VERBOSE-NEXT: # | 'fail': command not found +# VERBOSE-NEXT: # `----------------------------- +# VERBOSE-NEXT: # error: command failed with exit status: 127 +# VERBOSE-EMPTY: +# VERBOSE-NEXT: -- +# VERBOSE-EMPTY: +# VERBOSE-NEXT: ******************** +# VERBOSE-NEXT: PASS: verbosity :: pass.txt (2 of 5) +# VERBOSE-NEXT: {{UN}}SUPPORTED: verbosity :: unsupported.txt (3 of 5) +# VERBOSE-NEXT: {{X}}FAIL: verbosity :: xfail.txt (4 of 5) +# VERBOSE-NEXT: XPASS: verbosity :: xpass.txt (5 of 5) +# VERBOSE-NEXT: ******************** TEST 'verbosity :: xpass.txt' FAILED ******************** +# VERBOSE-NEXT: Exit Code: 0 +# VERBOSE-EMPTY: +# VERBOSE-NEXT: Command Output (stdout): +# VERBOSE-NEXT: -- +# VERBOSE-NEXT: # {{R}}UN: at line 2 +# VERBOSE-NEXT: echo "xpass test output" +# VERBOSE-NEXT: # executed command: echo 'xpass test output' +# VERBOSE-NEXT: # .---command stdout------------ +# VERBOSE-NEXT: # | xpass test output +# VERBOSE-NEXT: # `----------------------------- +# VERBOSE-EMPTY: +# VERBOSE-NEXT: -- +# VERBOSE-EMPTY: +# VERBOSE-NEXT: ******************** +# VERBOSE-NEXT: ******************** +# VERBOSE-NEXT: Failed Tests (1): +# VERBOSE-NEXT: verbosity :: fail.txt +# VERBOSE-EMPTY: +# VERBOSE-NEXT: ******************** +# VERBOSE-NEXT: Unexpectedly Passed Tests (1): +# VERBOSE-NEXT: verbosity :: xpass.txt +# VERBOSE-EMPTY: +# VERBOSE-EMPTY: +# VERBOSE-NEXT: Testing Time: {{.*}}s +# VERBOSE-EMPTY: +# VERBOSE-NEXT: Total Discovered Tests: 5 +# VERBOSE-NEXT: Unsupported : 1 (20.00%) +# VERBOSE-NEXT: Passed : 1 (20.00%) +# VERBOSE-NEXT: Expectedly Failed : 1 (20.00%) +# VERBOSE-NEXT: Failed : 1 (20.00%) +# VERBOSE-NEXT: Unexpectedly Passed: 1 (20.00%) + +# RUN: not %{lit} --show-all %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix SHOW-ALL < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + +# SHOW-ALL: -- Testing: 5 tests, 1 workers -- +# SHOW-ALL-NEXT: FAIL: verbosity :: fail.txt (1 of 5) +# SHOW-ALL-NEXT: ******************** TEST 'verbosity :: fail.txt' FAILED ******************** +# SHOW-ALL-NEXT: Exit Code: 127 +# SHOW-ALL-EMPTY: +# SHOW-ALL-NEXT: Command Output (stdout): +# SHOW-ALL-NEXT: -- +# SHOW-ALL-NEXT: # {{R}}UN: at line 1 +# SHOW-ALL-NEXT: echo "fail test output" +# SHOW-ALL-NEXT: # executed command: echo 'fail test output' +# SHOW-ALL-NEXT: # .---command stdout------------ +# SHOW-ALL-NEXT: # | fail test output +# SHOW-ALL-NEXT: # `----------------------------- +# SHOW-ALL-NEXT: # {{R}}UN: at line 2 +# SHOW-ALL-NEXT: fail +# SHOW-ALL-NEXT: # executed command: fail +# SHOW-ALL-NEXT: # .---command stderr------------ +# SHOW-ALL-NEXT: # | 'fail': command not found +# SHOW-ALL-NEXT: # `----------------------------- +# SHOW-ALL-NEXT: # error: command failed with exit status: 127 +# SHOW-ALL-EMPTY: +# SHOW-ALL-NEXT: -- +# SHOW-ALL-EMPTY: +# SHOW-ALL-NEXT: ******************** +# SHOW-ALL-NEXT: PASS: verbosity :: pass.txt (2 of 5) +# SHOW-ALL-NEXT: Exit Code: 0 +# SHOW-ALL-EMPTY: +# SHOW-ALL-NEXT: Command Output (stdout): +# SHOW-ALL-NEXT: -- +# SHOW-ALL-NEXT: # {{R}}UN: at line 1 +# SHOW-ALL-NEXT: echo "pass test output" +# SHOW-ALL-NEXT: # executed command: echo 'pass test output' +# SHOW-ALL-NEXT: # .---command stdout------------ +# SHOW-ALL-NEXT: # | pass test output +# SHOW-ALL-NEXT: # `----------------------------- +# SHOW-ALL-EMPTY: +# SHOW-ALL-NEXT: -- +# SHOW-ALL-EMPTY: +# SHOW-ALL-NEXT: ******************** +# SHOW-ALL-NEXT: {{UN}}SUPPORTED: verbosity :: unsupported.txt (3 of 5) +# SHOW-ALL-NEXT: Test requires the following unavailable features: asdf +# SHOW-ALL-NEXT: ******************** +# SHOW-ALL-NEXT: {{X}}FAIL: verbosity :: xfail.txt (4 of 5) +# SHOW-ALL-NEXT: Exit Code: 1 +# SHOW-ALL-EMPTY: +# SHOW-ALL-NEXT: Command Output (stdout): +# SHOW-ALL-NEXT: -- +# SHOW-ALL-NEXT: # {{R}}UN: at line 2 +# SHOW-ALL-NEXT: not echo "xfail test output" +# SHOW-ALL-NEXT: # executed command: not echo 'xfail test output' +# SHOW-ALL-NEXT: # .---command stdout------------ +# SHOW-ALL-NEXT: # | xfail test output +# SHOW-ALL-NEXT: # `----------------------------- +# SHOW-ALL-NEXT: # error: command failed with exit status: 1 +# SHOW-ALL-EMPTY: +# SHOW-ALL-NEXT: -- +# SHOW-ALL-EMPTY: +# SHOW-ALL-NEXT: ******************** +# SHOW-ALL-NEXT: XPASS: verbosity :: xpass.txt (5 of 5) +# SHOW-ALL-NEXT: ******************** TEST 'verbosity :: xpass.txt' FAILED ******************** +# SHOW-ALL-NEXT: Exit Code: 0 +# SHOW-ALL-EMPTY: +# SHOW-ALL-NEXT: Command Output (stdout): +# SHOW-ALL-NEXT: -- +# SHOW-ALL-NEXT: # {{R}}UN: at line 2 +# SHOW-ALL-NEXT: echo "xpass test output" +# SHOW-ALL-NEXT: # executed command: echo 'xpass test output' +# SHOW-ALL-NEXT: # .---command stdout------------ +# SHOW-ALL-NEXT: # | xpass test output +# SHOW-ALL-NEXT: # `----------------------------- +# SHOW-ALL-EMPTY: +# SHOW-ALL-NEXT: -- +# SHOW-ALL-EMPTY: +# SHOW-ALL-NEXT: ******************** +# SHOW-ALL-NEXT: ******************** +# SHOW-ALL-NEXT: Failed Tests (1): +# SHOW-ALL-NEXT: verbosity :: fail.txt +# SHOW-ALL-EMPTY: +# SHOW-ALL-NEXT: ******************** +# SHOW-ALL-NEXT: Unexpectedly Passed Tests (1): +# SHOW-ALL-NEXT: verbosity :: xpass.txt +# SHOW-ALL-EMPTY: +# SHOW-ALL-EMPTY: +# SHOW-ALL-NEXT: Testing Time: {{.*}}s +# SHOW-ALL-EMPTY: +# SHOW-ALL-NEXT: Total Discovered Tests: 5 +# SHOW-ALL-NEXT: Unsupported : 1 (20.00%) +# SHOW-ALL-NEXT: Passed : 1 (20.00%) +# SHOW-ALL-NEXT: Expectedly Failed : 1 (20.00%) +# SHOW-ALL-NEXT: Failed : 1 (20.00%) +# SHOW-ALL-NEXT: Unexpectedly Passed: 1 (20.00%) + +# RUN: not %{lit} --quiet %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix QUIET < %t/stdout.txt +# RUN: FileCheck %s --check-prefix QUIET-ERR --implicit-check-not lit < %t/stderr.txt + +# QUIET: -- Testing: 5 tests, 1 workers -- +# QUIET-NEXT: FAIL: verbosity :: fail.txt (1 of 5) +# QUIET-NEXT: XPASS: verbosity :: xpass.txt (5 of 5) +# QUIET-NEXT: ******************** +# QUIET-NEXT: Failed Tests (1): +# QUIET-NEXT: verbosity :: fail.txt +# QUIET-EMPTY: +# QUIET-NEXT: ******************** +# QUIET-NEXT: Unexpectedly Passed Tests (1): +# QUIET-NEXT: verbosity :: xpass.txt +# QUIET-EMPTY: +# QUIET-EMPTY: +# QUIET-NEXT: Total Discovered Tests: 5 +# QUIET-NEXT: Failed : 1 (20.00%) +# QUIET-NEXT: Unexpectedly Passed: 1 (20.00%) + +# QUIET-ERR: 1 warning(s) in tests + + +### Test log output + +# RUN: not %{lit} --debug %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix DEBUG < %t/stdout.txt +# RUN: FileCheck %s --check-prefix DEBUG-ERR --implicit-check-not lit < %t/stderr.txt + +# DEBUG: -- Testing: 5 tests, 1 workers -- +# DEBUG-NEXT: FAIL: verbosity :: fail.txt (1 of 5) +# DEBUG-NEXT: PASS: verbosity :: pass.txt (2 of 5) +# DEBUG-NEXT: {{UN}}SUPPORTED: verbosity :: unsupported.txt (3 of 5) +# DEBUG-NEXT: {{X}}FAIL: verbosity :: xfail.txt (4 of 5) +# DEBUG-NEXT: XPASS: verbosity :: xpass.txt (5 of 5) +# DEBUG-NEXT: ******************** +# DEBUG-NEXT: Failed Tests (1): +# DEBUG-NEXT: verbosity :: fail.txt +# DEBUG-EMPTY: +# DEBUG-NEXT: ******************** +# DEBUG-NEXT: Unexpectedly Passed Tests (1): +# DEBUG-NEXT: verbosity :: xpass.txt +# DEBUG-EMPTY: +# DEBUG-EMPTY: +# DEBUG-NEXT: Testing Time: {{.*}}s +# DEBUG-EMPTY: +# DEBUG-NEXT: Total Discovered Tests: 5 +# DEBUG-NEXT: Unsupported : 1 (20.00%) +# DEBUG-NEXT: Passed : 1 (20.00%) +# DEBUG-NEXT: Expectedly Failed : 1 (20.00%) +# DEBUG-NEXT: Failed : 1 (20.00%) +# DEBUG-NEXT: Unexpectedly Passed: 1 (20.00%) + +# DEBUG-ERR: lit.py: {{.*}}discovery.py:{{[0-9]+}}: debug: loading suite config '{{.*}}lit.cfg' +# DEBUG-ERR-NEXT: lit.py: {{.*}}lit.cfg:{{[0-9]+}}: debug: this is a debug log +# DEBUG-ERR-NEXT: lit.py: {{.*}}lit.cfg:{{[0-9]+}}: note: this is a note +# DEBUG-ERR-NEXT: lit.py: {{.*}}lit.cfg:{{[0-9]+}}: warning: this is a warning +# DEBUG-ERR-NEXT: lit.py: {{.*}}TestingConfig.py:{{[0-9]+}}: debug: ... loaded config '{{.*}}lit.cfg' +# DEBUG-ERR-NEXT: lit.py: {{.*}}discovery.py:{{[0-9]+}}: debug: resolved input '{{.*}}verbosity' to 'verbosity'::() +# DEBUG-ERR-EMPTY: +# DEBUG-ERR-NEXT: 1 warning(s) in tests + + +# RUN: not %{lit} --diagnostic-level note %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + +# RUN: not %{lit} --diagnostic-level warning %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS < %t/stdout.txt +# RUN: FileCheck %s --check-prefix WARNING-ERR --implicit-check-not lit < %t/stderr.txt + +# WARNING-ERR: lit.py: {{.*}}lit.cfg:{{[0-9]+}}: warning: this is a warning +# WARNING-ERR-EMPTY: +# WARNING-ERR-NEXT: 1 warning(s) in tests + +# RUN: not %{lit} --diagnostic-level error %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS < %t/stdout.txt +# RUN: FileCheck %s --check-prefix ERROR-ERR --implicit-check-not lit < %t/stderr.txt + +# ERROR-ERR: 1 warning(s) in tests + + +### Test --test-output + +# RUN: not %{lit} --test-output off %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + +# RUN: not %{lit} --test-output failed %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix VERBOSE < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + +# TEST-OUTPUT-OFF: -- Testing: 5 tests, 1 workers -- +# TEST-OUTPUT-OFF-NEXT: FAIL: verbosity :: fail.txt (1 of 5) +# TEST-OUTPUT-OFF-NEXT: PASS: verbosity :: pass.txt (2 of 5) +# TEST-OUTPUT-OFF-NEXT: {{UN}}SUPPORTED: verbosity :: unsupported.txt (3 of 5) +# TEST-OUTPUT-OFF-NEXT: {{X}}FAIL: verbosity :: xfail.txt (4 of 5) +# TEST-OUTPUT-OFF-NEXT: XPASS: verbosity :: xpass.txt (5 of 5) +# TEST-OUTPUT-OFF-NEXT: ******************** +# TEST-OUTPUT-OFF-NEXT: Failed Tests (1): +# TEST-OUTPUT-OFF-NEXT: verbosity :: fail.txt +# TEST-OUTPUT-OFF-EMPTY: +# TEST-OUTPUT-OFF-NEXT: ******************** +# TEST-OUTPUT-OFF-NEXT: Unexpectedly Passed Tests (1): +# TEST-OUTPUT-OFF-NEXT: verbosity :: xpass.txt +# TEST-OUTPUT-OFF-EMPTY: +# TEST-OUTPUT-OFF-EMPTY: +# TEST-OUTPUT-OFF-NEXT: Testing Time: {{.*}}s +# TEST-OUTPUT-OFF-EMPTY: +# TEST-OUTPUT-OFF-NEXT: Total Discovered Tests: 5 +# TEST-OUTPUT-OFF-NEXT: Unsupported : 1 (20.00%) +# TEST-OUTPUT-OFF-NEXT: Passed : 1 (20.00%) +# TEST-OUTPUT-OFF-NEXT: Expectedly Failed : 1 (20.00%) +# TEST-OUTPUT-OFF-NEXT: Failed : 1 (20.00%) +# TEST-OUTPUT-OFF-NEXT: Unexpectedly Passed: 1 (20.00%) + +# RUN: not %{lit} --test-output all %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix SHOW-ALL < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + + +### Test --print-result-after + +# RUN: not %{lit} --print-result-after off %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix RESULT-OFF < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + +# RESULT-OFF: ******************** +# RESULT-OFF-NEXT: Failed Tests (1): +# RESULT-OFF-NEXT: verbosity :: fail.txt +# RESULT-OFF-EMPTY: +# RESULT-OFF-NEXT: ******************** +# RESULT-OFF-NEXT: Unexpectedly Passed Tests (1): +# RESULT-OFF-NEXT: verbosity :: xpass.txt +# RESULT-OFF-EMPTY: +# RESULT-OFF-EMPTY: +# RESULT-OFF-NEXT: Testing Time: {{.*}}s +# RESULT-OFF-EMPTY: +# RESULT-OFF-NEXT: Total Discovered Tests: 5 +# RESULT-OFF-NEXT: Unsupported : 1 (20.00%) +# RESULT-OFF-NEXT: Passed : 1 (20.00%) +# RESULT-OFF-NEXT: Expectedly Failed : 1 (20.00%) +# RESULT-OFF-NEXT: Failed : 1 (20.00%) +# RESULT-OFF-NEXT: Unexpectedly Passed: 1 (20.00%) + + +# RUN: not %{lit} --print-result-after failed %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix RESULT-FAILED < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + +# RESULT-FAILED: -- Testing: 5 tests, 1 workers -- +# RESULT-FAILED-NEXT: FAIL: verbosity :: fail.txt (1 of 5) +# RESULT-FAILED-NEXT: XPASS: verbosity :: xpass.txt (5 of 5) +# RESULT-FAILED-NEXT: ******************** +# RESULT-FAILED-NEXT: Failed Tests (1): +# RESULT-FAILED-NEXT: verbosity :: fail.txt +# RESULT-FAILED-EMPTY: +# RESULT-FAILED-NEXT: ******************** +# RESULT-FAILED-NEXT: Unexpectedly Passed Tests (1): +# RESULT-FAILED-NEXT: verbosity :: xpass.txt +# RESULT-FAILED-EMPTY: +# RESULT-FAILED-EMPTY: +# RESULT-FAILED-NEXT: Testing Time: {{.*}}s +# RESULT-FAILED-EMPTY: +# RESULT-FAILED-NEXT: Total Discovered Tests: 5 +# RESULT-FAILED-NEXT: Unsupported : 1 (20.00%) +# RESULT-FAILED-NEXT: Passed : 1 (20.00%) +# RESULT-FAILED-NEXT: Expectedly Failed : 1 (20.00%) +# RESULT-FAILED-NEXT: Failed : 1 (20.00%) +# RESULT-FAILED-NEXT: Unexpectedly Passed: 1 (20.00%) + + +# RUN: not %{lit} --print-result-after all %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + + +### Test combinations of --print-result-after followed by --test-output + +# RUN: not %{lit} --print-result-after off --test-output failed %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix RESULT-OFF-OUTPUT-FAILED < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + +# RESULT-OFF-OUTPUT-FAILED: -- Testing: 5 tests, 1 workers -- +# RESULT-OFF-OUTPUT-FAILED-NEXT: FAIL: verbosity :: fail.txt (1 of 5) +# RESULT-OFF-OUTPUT-FAILED-NEXT: ******************** TEST 'verbosity :: fail.txt' FAILED ******************** +# RESULT-OFF-OUTPUT-FAILED-NEXT: Exit Code: 127 +# RESULT-OFF-OUTPUT-FAILED-EMPTY: +# RESULT-OFF-OUTPUT-FAILED-NEXT: Command Output (stdout): +# RESULT-OFF-OUTPUT-FAILED-NEXT: -- +# RESULT-OFF-OUTPUT-FAILED-NEXT: # {{R}}UN: at line 1 +# RESULT-OFF-OUTPUT-FAILED-NEXT: echo "fail test output" +# RESULT-OFF-OUTPUT-FAILED-NEXT: # executed command: echo 'fail test output' +# RESULT-OFF-OUTPUT-FAILED-NEXT: # .---command stdout------------ +# RESULT-OFF-OUTPUT-FAILED-NEXT: # | fail test output +# RESULT-OFF-OUTPUT-FAILED-NEXT: # `----------------------------- +# RESULT-OFF-OUTPUT-FAILED-NEXT: # {{R}}UN: at line 2 +# RESULT-OFF-OUTPUT-FAILED-NEXT: fail +# RESULT-OFF-OUTPUT-FAILED-NEXT: # executed command: fail +# RESULT-OFF-OUTPUT-FAILED-NEXT: # .---command stderr------------ +# RESULT-OFF-OUTPUT-FAILED-NEXT: # | 'fail': command not found +# RESULT-OFF-OUTPUT-FAILED-NEXT: # `----------------------------- +# RESULT-OFF-OUTPUT-FAILED-NEXT: # error: command failed with exit status: 127 +# RESULT-OFF-OUTPUT-FAILED-EMPTY: +# RESULT-OFF-OUTPUT-FAILED-NEXT: -- +# RESULT-OFF-OUTPUT-FAILED-EMPTY: +# RESULT-OFF-OUTPUT-FAILED-NEXT: ******************** +# RESULT-OFF-OUTPUT-FAILED-NEXT: XPASS: verbosity :: xpass.txt (5 of 5) +# RESULT-OFF-OUTPUT-FAILED-NEXT: ******************** TEST 'verbosity :: xpass.txt' FAILED ******************** +# RESULT-OFF-OUTPUT-FAILED-NEXT: Exit Code: 0 +# RESULT-OFF-OUTPUT-FAILED-EMPTY: +# RESULT-OFF-OUTPUT-FAILED-NEXT: Command Output (stdout): +# RESULT-OFF-OUTPUT-FAILED-NEXT: -- +# RESULT-OFF-OUTPUT-FAILED-NEXT: # {{R}}UN: at line 2 +# RESULT-OFF-OUTPUT-FAILED-NEXT: echo "xpass test output" +# RESULT-OFF-OUTPUT-FAILED-NEXT: # executed command: echo 'xpass test output' +# RESULT-OFF-OUTPUT-FAILED-NEXT: # .---command stdout------------ +# RESULT-OFF-OUTPUT-FAILED-NEXT: # | xpass test output +# RESULT-OFF-OUTPUT-FAILED-NEXT: # `----------------------------- +# RESULT-OFF-OUTPUT-FAILED-EMPTY: +# RESULT-OFF-OUTPUT-FAILED-NEXT: -- +# RESULT-OFF-OUTPUT-FAILED-EMPTY: +# RESULT-OFF-OUTPUT-FAILED-NEXT: ******************** +# RESULT-OFF-OUTPUT-FAILED-NEXT: ******************** +# RESULT-OFF-OUTPUT-FAILED-NEXT: Failed Tests (1): +# RESULT-OFF-OUTPUT-FAILED-NEXT: verbosity :: fail.txt +# RESULT-OFF-OUTPUT-FAILED-EMPTY: +# RESULT-OFF-OUTPUT-FAILED-NEXT: ******************** +# RESULT-OFF-OUTPUT-FAILED-NEXT: Unexpectedly Passed Tests (1): +# RESULT-OFF-OUTPUT-FAILED-NEXT: verbosity :: xpass.txt +# RESULT-OFF-OUTPUT-FAILED-EMPTY: +# RESULT-OFF-OUTPUT-FAILED-EMPTY: +# RESULT-OFF-OUTPUT-FAILED-NEXT: Testing Time: {{.*}}s +# RESULT-OFF-OUTPUT-FAILED-EMPTY: +# RESULT-OFF-OUTPUT-FAILED-NEXT: Total Discovered Tests: 5 +# RESULT-OFF-OUTPUT-FAILED-NEXT: Unsupported : 1 (20.00%) +# RESULT-OFF-OUTPUT-FAILED-NEXT: Passed : 1 (20.00%) +# RESULT-OFF-OUTPUT-FAILED-NEXT: Expectedly Failed : 1 (20.00%) +# RESULT-OFF-OUTPUT-FAILED-NEXT: Failed : 1 (20.00%) +# RESULT-OFF-OUTPUT-FAILED-NEXT: Unexpectedly Passed: 1 (20.00%) + +# RUN: not %{lit} --print-result-after all --test-output off %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + +# RUN: not %{lit} --print-result-after failed --test-output all %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix SHOW-ALL < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + + +### Test combinations of --test-output followed by --print-result-after + +# RUN: not %{lit} --test-output failed --print-result-after off %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix RESULT-OFF < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + +# RUN: not %{lit} --test-output off --print-result-after all %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + +# RUN: not %{lit} --test-output all --print-result-after failed %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix OUTPUT-ALL-RESULT-FAILED < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + +# OUTPUT-ALL-RESULT-FAILED: -- Testing: 5 tests, 1 workers -- +# OUTPUT-ALL-RESULT-FAILED-NEXT: FAIL: verbosity :: fail.txt (1 of 5) +# OUTPUT-ALL-RESULT-FAILED-NEXT: ******************** TEST 'verbosity :: fail.txt' FAILED ******************** +# OUTPUT-ALL-RESULT-FAILED-NEXT: Exit Code: 127 +# OUTPUT-ALL-RESULT-FAILED-EMPTY: +# OUTPUT-ALL-RESULT-FAILED-NEXT: Command Output (stdout): +# OUTPUT-ALL-RESULT-FAILED-NEXT: -- +# OUTPUT-ALL-RESULT-FAILED-NEXT: # {{R}}UN: at line 1 +# OUTPUT-ALL-RESULT-FAILED-NEXT: echo "fail test output" +# OUTPUT-ALL-RESULT-FAILED-NEXT: # executed command: echo 'fail test output' +# OUTPUT-ALL-RESULT-FAILED-NEXT: # .---command stdout------------ +# OUTPUT-ALL-RESULT-FAILED-NEXT: # | fail test output +# OUTPUT-ALL-RESULT-FAILED-NEXT: # `----------------------------- +# OUTPUT-ALL-RESULT-FAILED-NEXT: # {{R}}UN: at line 2 +# OUTPUT-ALL-RESULT-FAILED-NEXT: fail +# OUTPUT-ALL-RESULT-FAILED-NEXT: # executed command: fail +# OUTPUT-ALL-RESULT-FAILED-NEXT: # .---command stderr------------ +# OUTPUT-ALL-RESULT-FAILED-NEXT: # | 'fail': command not found +# OUTPUT-ALL-RESULT-FAILED-NEXT: # `----------------------------- +# OUTPUT-ALL-RESULT-FAILED-NEXT: # error: command failed with exit status: 127 +# OUTPUT-ALL-RESULT-FAILED-EMPTY: +# OUTPUT-ALL-RESULT-FAILED-NEXT: -- +# OUTPUT-ALL-RESULT-FAILED-EMPTY: +# OUTPUT-ALL-RESULT-FAILED-NEXT: ******************** +# OUTPUT-ALL-RESULT-FAILED-NEXT: XPASS: verbosity :: xpass.txt (5 of 5) +# OUTPUT-ALL-RESULT-FAILED-NEXT: ******************** TEST 'verbosity :: xpass.txt' FAILED ******************** +# OUTPUT-ALL-RESULT-FAILED-NEXT: Exit Code: 0 +# OUTPUT-ALL-RESULT-FAILED-EMPTY: +# OUTPUT-ALL-RESULT-FAILED-NEXT: Command Output (stdout): +# OUTPUT-ALL-RESULT-FAILED-NEXT: -- +# OUTPUT-ALL-RESULT-FAILED-NEXT: # {{R}}UN: at line 2 +# OUTPUT-ALL-RESULT-FAILED-NEXT: echo "xpass test output" +# OUTPUT-ALL-RESULT-FAILED-NEXT: # executed command: echo 'xpass test output' +# OUTPUT-ALL-RESULT-FAILED-NEXT: # .---command stdout------------ +# OUTPUT-ALL-RESULT-FAILED-NEXT: # | xpass test output +# OUTPUT-ALL-RESULT-FAILED-NEXT: # `----------------------------- +# OUTPUT-ALL-RESULT-FAILED-EMPTY: +# OUTPUT-ALL-RESULT-FAILED-NEXT: -- +# OUTPUT-ALL-RESULT-FAILED-EMPTY: +# OUTPUT-ALL-RESULT-FAILED-NEXT: ******************** +# OUTPUT-ALL-RESULT-FAILED-NEXT: ******************** +# OUTPUT-ALL-RESULT-FAILED-NEXT: Failed Tests (1): +# OUTPUT-ALL-RESULT-FAILED-NEXT: verbosity :: fail.txt +# OUTPUT-ALL-RESULT-FAILED-EMPTY: +# OUTPUT-ALL-RESULT-FAILED-NEXT: ******************** +# OUTPUT-ALL-RESULT-FAILED-NEXT: Unexpectedly Passed Tests (1): +# OUTPUT-ALL-RESULT-FAILED-NEXT: verbosity :: xpass.txt +# OUTPUT-ALL-RESULT-FAILED-EMPTY: +# OUTPUT-ALL-RESULT-FAILED-EMPTY: +# OUTPUT-ALL-RESULT-FAILED-NEXT: Testing Time: {{.*}} +# OUTPUT-ALL-RESULT-FAILED-EMPTY: +# OUTPUT-ALL-RESULT-FAILED-NEXT: Total Discovered Tests: 5 +# OUTPUT-ALL-RESULT-FAILED-NEXT: Unsupported : 1 (20.00%) +# OUTPUT-ALL-RESULT-FAILED-NEXT: Passed : 1 (20.00%) +# OUTPUT-ALL-RESULT-FAILED-NEXT: Expectedly Failed : 1 (20.00%) +# OUTPUT-ALL-RESULT-FAILED-NEXT: Failed : 1 (20.00%) +# OUTPUT-ALL-RESULT-FAILED-NEXT: Unexpectedly Passed: 1 (20.00%) + + +### Test progress bar and terse summary in isolation + +# RUN: not %{lit} --progress-bar %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix PROGRESS < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + +# PROGRESS: -- Testing: 5 tests, 1 workers -- +# PROGRESS-NEXT: Testing: +# PROGRESS-NEXT: FAIL: verbosity :: fail.txt (1 of 5) +# PROGRESS-NEXT: Testing: 0.. +# PROGRESS-NEXT: PASS: verbosity :: pass.txt (2 of 5) +# PROGRESS-NEXT: Testing: 0.. 10.. +# PROGRESS-NEXT: {{UN}}SUPPORTED: verbosity :: unsupported.txt (3 of 5) +# PROGRESS-NEXT: Testing: 0.. 10.. 20.. +# PROGRESS-NEXT: {{X}}FAIL: verbosity :: xfail.txt (4 of 5) +# PROGRESS-NEXT: Testing: 0.. 10.. 20.. 30.. +# PROGRESS-NEXT: XPASS: verbosity :: xpass.txt (5 of 5) +# PROGRESS-NEXT: Testing: 0.. 10.. 20.. 30.. 40.. 50.. 60.. 70.. 80.. 90.. +# PROGRESS-NEXT: ******************** +# PROGRESS-NEXT: Failed Tests (1): +# PROGRESS-NEXT: verbosity :: fail.txt +# PROGRESS-EMPTY: +# PROGRESS-NEXT: ******************** +# PROGRESS-NEXT: Unexpectedly Passed Tests (1): +# PROGRESS-NEXT: verbosity :: xpass.txt +# PROGRESS-EMPTY: +# PROGRESS-EMPTY: +# PROGRESS-NEXT: Testing Time: {{.*}}s +# PROGRESS-EMPTY: +# PROGRESS-NEXT: Total Discovered Tests: 5 +# PROGRESS-NEXT: Unsupported : 1 (20.00%) +# PROGRESS-NEXT: Passed : 1 (20.00%) +# PROGRESS-NEXT: Expectedly Failed : 1 (20.00%) +# PROGRESS-NEXT: Failed : 1 (20.00%) +# PROGRESS-NEXT: Unexpectedly Passed: 1 (20.00%) + +# RUN: not %{lit} --terse-summary %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix TERSE < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + +# TERSE: -- Testing: 5 tests, 1 workers -- +# TERSE-NEXT: FAIL: verbosity :: fail.txt (1 of 5) +# TERSE-NEXT: PASS: verbosity :: pass.txt (2 of 5) +# TERSE-NEXT: {{UN}}SUPPORTED: verbosity :: unsupported.txt (3 of 5) +# TERSE-NEXT: {{X}}FAIL: verbosity :: xfail.txt (4 of 5) +# TERSE-NEXT: XPASS: verbosity :: xpass.txt (5 of 5) +# TERSE-NEXT: ******************** +# TERSE-NEXT: Failed Tests (1): +# TERSE-NEXT: verbosity :: fail.txt +# TERSE-EMPTY: +# TERSE-NEXT: ******************** +# TERSE-NEXT: Unexpectedly Passed Tests (1): +# TERSE-NEXT: verbosity :: xpass.txt +# TERSE-EMPTY: +# TERSE-EMPTY: +# TERSE-NEXT: Total Discovered Tests: 5 +# TERSE-NEXT: Failed : 1 (20.00%) +# TERSE-NEXT: Unexpectedly Passed: 1 (20.00%) + + +### Aliases in combination + +# RUN: not %{lit} -a -s %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix AS < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + +# AS: -- Testing: 5 tests, 1 workers -- +# AS-NEXT: Testing: +# AS-NEXT: FAIL: verbosity :: fail.txt (1 of 5) +# AS-NEXT: ******************** TEST 'verbosity :: fail.txt' FAILED ******************** +# AS-NEXT: Exit Code: 127 +# AS-EMPTY: +# AS-NEXT: Command Output (stdout): +# AS-NEXT: -- +# AS-NEXT: # {{R}}UN: at line 1 +# AS-NEXT: echo "fail test output" +# AS-NEXT: # executed command: echo 'fail test output' +# AS-NEXT: # .---command stdout------------ +# AS-NEXT: # | fail test output +# AS-NEXT: # `----------------------------- +# AS-NEXT: # {{R}}UN: at line 2 +# AS-NEXT: fail +# AS-NEXT: # executed command: fail +# AS-NEXT: # .---command stderr------------ +# AS-NEXT: # | 'fail': command not found +# AS-NEXT: # `----------------------------- +# AS-NEXT: # error: command failed with exit status: 127 +# AS-EMPTY: +# AS-NEXT: -- +# AS-EMPTY: +# AS-NEXT: ******************** +# AS-NEXT: Testing: 0.. 10.. +# AS-NEXT: XPASS: verbosity :: xpass.txt (5 of 5) +# AS-NEXT: ******************** TEST 'verbosity :: xpass.txt' FAILED ******************** +# AS-NEXT: Exit Code: 0 +# AS-EMPTY: +# AS-NEXT: Command Output (stdout): +# AS-NEXT: -- +# AS-NEXT: # {{R}}UN: at line 2 +# AS-NEXT: echo "xpass test output" +# AS-NEXT: # executed command: echo 'xpass test output' +# AS-NEXT: # .---command stdout------------ +# AS-NEXT: # | xpass test output +# AS-NEXT: # `----------------------------- +# AS-EMPTY: +# AS-NEXT: -- +# AS-EMPTY: +# AS-NEXT: ******************** +# AS-NEXT: Testing: 0.. 10.. 20.. 30.. 40.. 50.. 60.. 70.. 80.. 90.. +# AS-NEXT: ******************** +# AS-NEXT: Failed Tests (1): +# AS-NEXT: verbosity :: fail.txt +# AS-EMPTY: +# AS-NEXT: ******************** +# AS-NEXT: Unexpectedly Passed Tests (1): +# AS-NEXT: verbosity :: xpass.txt +# AS-EMPTY: +# AS-EMPTY: +# AS-NEXT: Testing Time: {{.*}}s +# AS-EMPTY: +# AS-NEXT: Total Discovered Tests: 5 +# AS-NEXT: Unsupported : 1 (20.00%) +# AS-NEXT: Passed : 1 (20.00%) +# AS-NEXT: Expectedly Failed : 1 (20.00%) +# AS-NEXT: Failed : 1 (20.00%) +# AS-NEXT: Unexpectedly Passed: 1 (20.00%) + + +# RUN: not %{lit} -s -a %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix SA < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + +# SA: -- Testing: 5 tests, 1 workers -- +# SA-NEXT: Testing: +# SA-NEXT: FAIL: verbosity :: fail.txt (1 of 5) +# SA-NEXT: ******************** TEST 'verbosity :: fail.txt' FAILED ******************** +# SA-NEXT: Exit Code: 127 +# SA-EMPTY: +# SA-NEXT: Command Output (stdout): +# SA-NEXT: -- +# SA-NEXT: # {{R}}UN: at line 1 +# SA-NEXT: echo "fail test output" +# SA-NEXT: # executed command: echo 'fail test output' +# SA-NEXT: # .---command stdout------------ +# SA-NEXT: # | fail test output +# SA-NEXT: # `----------------------------- +# SA-NEXT: # {{R}}UN: at line 2 +# SA-NEXT: fail +# SA-NEXT: # executed command: fail +# SA-NEXT: # .---command stderr------------ +# SA-NEXT: # | 'fail': command not found +# SA-NEXT: # `----------------------------- +# SA-NEXT: # error: command failed with exit status: 127 +# SA-EMPTY: +# SA-NEXT: -- +# SA-EMPTY: +# SA-NEXT: ******************** +# SA-NEXT: Testing: 0.. 10.. +# SA-NEXT: PASS: verbosity :: pass.txt (2 of 5) +# SA-NEXT: Exit Code: 0 +# SA-EMPTY: +# SA-NEXT: Command Output (stdout): +# SA-NEXT: -- +# SA-NEXT: # {{R}}UN: at line 1 +# SA-NEXT: echo "pass test output" +# SA-NEXT: # executed command: echo 'pass test output' +# SA-NEXT: # .---command stdout------------ +# SA-NEXT: # | pass test output +# SA-NEXT: # `----------------------------- +# SA-EMPTY: +# SA-NEXT: -- +# SA-EMPTY: +# SA-NEXT: ******************** +# SA-NEXT: Testing: 0.. 10.. 20.. +# SA-NEXT: {{UN}}SUPPORTED: verbosity :: unsupported.txt (3 of 5) +# SA-NEXT: Test requires the following unavailable features: asdf +# SA-NEXT: ******************** +# SA-NEXT: Testing: 0.. 10.. 20.. 30.. +# SA-NEXT: {{X}}FAIL: verbosity :: xfail.txt (4 of 5) +# SA-NEXT: Exit Code: 1 +# SA-EMPTY: +# SA-NEXT: Command Output (stdout): +# SA-NEXT: -- +# SA-NEXT: # {{R}}UN: at line 2 +# SA-NEXT: not echo "xfail test output" +# SA-NEXT: # executed command: not echo 'xfail test output' +# SA-NEXT: # .---command stdout------------ +# SA-NEXT: # | xfail test output +# SA-NEXT: # `----------------------------- +# SA-NEXT: # error: command failed with exit status: 1 +# SA-EMPTY: +# SA-NEXT: -- +# SA-EMPTY: +# SA-NEXT: ******************** +# SA-NEXT: Testing: 0.. 10.. 20.. 30.. 40.. +# SA-NEXT: XPASS: verbosity :: xpass.txt (5 of 5) +# SA-NEXT: ******************** TEST 'verbosity :: xpass.txt' FAILED ******************** +# SA-NEXT: Exit Code: 0 +# SA-EMPTY: +# SA-NEXT: Command Output (stdout): +# SA-NEXT: -- +# SA-NEXT: # {{R}}UN: at line 2 +# SA-NEXT: echo "xpass test output" +# SA-NEXT: # executed command: echo 'xpass test output' +# SA-NEXT: # .---command stdout------------ +# SA-NEXT: # | xpass test output +# SA-NEXT: # `----------------------------- +# SA-EMPTY: +# SA-NEXT: -- +# SA-EMPTY: +# SA-NEXT: ******************** +# SA-NEXT: Testing: 0.. 10.. 20.. 30.. 40.. 50.. 60.. 70.. 80.. 90.. +# SA-NEXT: ******************** +# SA-NEXT: Failed Tests (1): +# SA-NEXT: verbosity :: fail.txt +# SA-EMPTY: +# SA-NEXT: ******************** +# SA-NEXT: Unexpectedly Passed Tests (1): +# SA-NEXT: verbosity :: xpass.txt +# SA-EMPTY: +# SA-EMPTY: +# SA-NEXT: Testing Time: {{.*}}s +# SA-EMPTY: +# SA-NEXT: Total Discovered Tests: 5 +# SA-NEXT: Unsupported : 1 (20.00%) +# SA-NEXT: Passed : 1 (20.00%) +# SA-NEXT: Expectedly Failed : 1 (20.00%) +# SA-NEXT: Failed : 1 (20.00%) +# SA-NEXT: Unexpectedly Passed: 1 (20.00%) + + +# RUN: not %{lit} -q -a %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix QA < %t/stdout.txt +# RUN: FileCheck %s --check-prefix QUIET-ERR --implicit-check-not lit < %t/stderr.txt + +# QA: -- Testing: 5 tests, 1 workers -- +# QA-NEXT: FAIL: verbosity :: fail.txt (1 of 5) +# QA-NEXT: ******************** TEST 'verbosity :: fail.txt' FAILED ******************** +# QA-NEXT: Exit Code: 127 +# QA-EMPTY: +# QA-NEXT: Command Output (stdout): +# QA-NEXT: -- +# QA-NEXT: # {{R}}UN: at line 1 +# QA-NEXT: echo "fail test output" +# QA-NEXT: # executed command: echo 'fail test output' +# QA-NEXT: # .---command stdout------------ +# QA-NEXT: # | fail test output +# QA-NEXT: # `----------------------------- +# QA-NEXT: # {{R}}UN: at line 2 +# QA-NEXT: fail +# QA-NEXT: # executed command: fail +# QA-NEXT: # .---command stderr------------ +# QA-NEXT: # | 'fail': command not found +# QA-NEXT: # `----------------------------- +# QA-NEXT: # error: command failed with exit status: 127 +# QA-EMPTY: +# QA-NEXT: -- +# QA-EMPTY: +# QA-NEXT: ******************** +# QA-NEXT: PASS: verbosity :: pass.txt (2 of 5) +# QA-NEXT: Exit Code: 0 +# QA-EMPTY: +# QA-NEXT: Command Output (stdout): +# QA-NEXT: -- +# QA-NEXT: # {{R}}UN: at line 1 +# QA-NEXT: echo "pass test output" +# QA-NEXT: # executed command: echo 'pass test output' +# QA-NEXT: # .---command stdout------------ +# QA-NEXT: # | pass test output +# QA-NEXT: # `----------------------------- +# QA-EMPTY: +# QA-NEXT: -- +# QA-EMPTY: +# QA-NEXT: ******************** +# QA-NEXT: {{UN}}SUPPORTED: verbosity :: unsupported.txt (3 of 5) +# QA-NEXT: Test requires the following unavailable features: asdf +# QA-NEXT: ******************** +# QA-NEXT: {{X}}FAIL: verbosity :: xfail.txt (4 of 5) +# QA-NEXT: Exit Code: 1 +# QA-EMPTY: +# QA-NEXT: Command Output (stdout): +# QA-NEXT: -- +# QA-NEXT: # {{R}}UN: at line 2 +# QA-NEXT: not echo "xfail test output" +# QA-NEXT: # executed command: not echo 'xfail test output' +# QA-NEXT: # .---command stdout------------ +# QA-NEXT: # | xfail test output +# QA-NEXT: # `----------------------------- +# QA-NEXT: # error: command failed with exit status: 1 +# QA-EMPTY: +# QA-NEXT: -- +# QA-EMPTY: +# QA-NEXT: ******************** +# QA-NEXT: XPASS: verbosity :: xpass.txt (5 of 5) +# QA-NEXT: ******************** TEST 'verbosity :: xpass.txt' FAILED ******************** +# QA-NEXT: Exit Code: 0 +# QA-EMPTY: +# QA-NEXT: Command Output (stdout): +# QA-NEXT: -- +# QA-NEXT: # {{R}}UN: at line 2 +# QA-NEXT: echo "xpass test output" +# QA-NEXT: # executed command: echo 'xpass test output' +# QA-NEXT: # .---command stdout------------ +# QA-NEXT: # | xpass test output +# QA-NEXT: # `----------------------------- +# QA-EMPTY: +# QA-NEXT: -- +# QA-EMPTY: +# QA-NEXT: ******************** +# QA-NEXT: ******************** +# QA-NEXT: Failed Tests (1): +# QA-NEXT: verbosity :: fail.txt +# QA-EMPTY: +# QA-NEXT: ******************** +# QA-NEXT: Unexpectedly Passed Tests (1): +# QA-NEXT: verbosity :: xpass.txt +# QA-EMPTY: +# QA-EMPTY: +# QA-NEXT: Total Discovered Tests: 5 +# QA-NEXT: Failed : 1 (20.00%) +# QA-NEXT: Unexpectedly Passed: 1 (20.00%) + +# RUN: not %{lit} -a -q %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix QUIET < %t/stdout.txt +# RUN: FileCheck %s --check-prefix QUIET-ERR --implicit-check-not lit < %t/stderr.txt + +# RUN: not %{lit} -sqav %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix SQAV < %t/stdout.txt +# RUN: FileCheck %s --check-prefix QUIET-ERR --implicit-check-not lit < %t/stderr.txt + +# SQAV: -- Testing: 5 tests, 1 workers -- +# SQAV-NEXT: Testing: +# SQAV-NEXT: FAIL: verbosity :: fail.txt (1 of 5) +# SQAV-NEXT: ******************** TEST 'verbosity :: fail.txt' FAILED ******************** +# SQAV-NEXT: Exit Code: 127 +# SQAV-EMPTY: +# SQAV-NEXT: Command Output (stdout): +# SQAV-NEXT: -- +# SQAV-NEXT: # {{R}}UN: at line 1 +# SQAV-NEXT: echo "fail test output" +# SQAV-NEXT: # executed command: echo 'fail test output' +# SQAV-NEXT: # .---command stdout------------ +# SQAV-NEXT: # | fail test output +# SQAV-NEXT: # `----------------------------- +# SQAV-NEXT: # {{R}}UN: at line 2 +# SQAV-NEXT: fail +# SQAV-NEXT: # executed command: fail +# SQAV-NEXT: # .---command stderr------------ +# SQAV-NEXT: # | 'fail': command not found +# SQAV-NEXT: # `----------------------------- +# SQAV-NEXT: # error: command failed with exit status: 127 +# SQAV-EMPTY: +# SQAV-NEXT: -- +# SQAV-EMPTY: +# SQAV-NEXT: ******************** +# SQAV-NEXT: Testing: 0.. 10.. +# SQAV-NEXT: PASS: verbosity :: pass.txt (2 of 5) +# SQAV-NEXT: Testing: 0.. 10.. 20.. +# SQAV-NEXT: {{UN}}SUPPORTED: verbosity :: unsupported.txt (3 of 5) +# SQAV-NEXT: Testing: 0.. 10.. 20.. 30.. +# SQAV-NEXT: {{X}}FAIL: verbosity :: xfail.txt (4 of 5) +# SQAV-NEXT: Testing: 0.. 10.. 20.. 30.. 40.. +# SQAV-NEXT: XPASS: verbosity :: xpass.txt (5 of 5) +# SQAV-NEXT: ******************** TEST 'verbosity :: xpass.txt' FAILED ******************** +# SQAV-NEXT: Exit Code: 0 +# SQAV-EMPTY: +# SQAV-NEXT: Command Output (stdout): +# SQAV-NEXT: -- +# SQAV-NEXT: # {{R}}UN: at line 2 +# SQAV-NEXT: echo "xpass test output" +# SQAV-NEXT: # executed command: echo 'xpass test output' +# SQAV-NEXT: # .---command stdout------------ +# SQAV-NEXT: # | xpass test output +# SQAV-NEXT: # `----------------------------- +# SQAV-EMPTY: +# SQAV-NEXT: -- +# SQAV-EMPTY: +# SQAV-NEXT: ******************** +# SQAV-NEXT: Testing: 0.. 10.. 20.. 30.. 40.. 50.. 60.. 70.. 80.. 90.. +# SQAV-NEXT: ******************** +# SQAV-NEXT: Failed Tests (1): +# SQAV-NEXT: verbosity :: fail.txt +# SQAV-EMPTY: +# SQAV-NEXT: ******************** +# SQAV-NEXT: Unexpectedly Passed Tests (1): +# SQAV-NEXT: verbosity :: xpass.txt +# SQAV-EMPTY: +# SQAV-EMPTY: +# SQAV-NEXT: Total Discovered Tests: 5 +# SQAV-NEXT: Failed : 1 (20.00%) +# SQAV-NEXT: Unexpectedly Passed: 1 (20.00%) + + +### Aliases with specific overrides + +# RUN: not %{lit} --quiet --no-terse-summary %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix QUIET-W-SUMMARY < %t/stdout.txt +# RUN: FileCheck %s --check-prefix QUIET-ERR --implicit-check-not lit < %t/stderr.txt + +# QUIET-W-SUMMARY: -- Testing: 5 tests, 1 workers -- +# QUIET-W-SUMMARY-NEXT: FAIL: verbosity :: fail.txt (1 of 5) +# QUIET-W-SUMMARY-NEXT: XPASS: verbosity :: xpass.txt (5 of 5) +# QUIET-W-SUMMARY-NEXT: ******************** +# QUIET-W-SUMMARY-NEXT: Failed Tests (1): +# QUIET-W-SUMMARY-NEXT: verbosity :: fail.txt +# QUIET-W-SUMMARY-EMPTY: +# QUIET-W-SUMMARY-NEXT: ******************** +# QUIET-W-SUMMARY-NEXT: Unexpectedly Passed Tests (1): +# QUIET-W-SUMMARY-NEXT: verbosity :: xpass.txt +# QUIET-W-SUMMARY-EMPTY: +# QUIET-W-SUMMARY-EMPTY: +# QUIET-W-SUMMARY-NEXT: Testing Time: {{.*}}s +# QUIET-W-SUMMARY-EMPTY: +# QUIET-W-SUMMARY-NEXT: Total Discovered Tests: 5 +# QUIET-W-SUMMARY-NEXT: Unsupported : 1 (20.00%) +# QUIET-W-SUMMARY-NEXT: Passed : 1 (20.00%) +# QUIET-W-SUMMARY-NEXT: Expectedly Failed : 1 (20.00%) +# QUIET-W-SUMMARY-NEXT: Failed : 1 (20.00%) +# QUIET-W-SUMMARY-NEXT: Unexpectedly Passed: 1 (20.00%) + + +# RUN: not %{lit} --quiet --progress-bar %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix QUIET-W-PROGRESS < %t/stdout.txt +# RUN: FileCheck %s --check-prefix QUIET-ERR --implicit-check-not lit < %t/stderr.txt + +# QUIET-W-PROGRESS: -- Testing: 5 tests, 1 workers -- +# QUIET-W-PROGRESS-NEXT: Testing: +# QUIET-W-PROGRESS-NEXT: FAIL: verbosity :: fail.txt (1 of 5) +# QUIET-W-PROGRESS-NEXT: Testing: 0.. 10.. +# QUIET-W-PROGRESS-NEXT: XPASS: verbosity :: xpass.txt (5 of 5) +# QUIET-W-PROGRESS-NEXT: Testing: 0.. 10.. 20.. 30.. 40.. 50.. 60.. 70.. 80.. 90.. +# QUIET-W-PROGRESS-NEXT: ******************** +# QUIET-W-PROGRESS-NEXT: Failed Tests (1): +# QUIET-W-PROGRESS-NEXT: verbosity :: fail.txt +# QUIET-W-PROGRESS-EMPTY: +# QUIET-W-PROGRESS-NEXT: ******************** +# QUIET-W-PROGRESS-NEXT: Unexpectedly Passed Tests (1): +# QUIET-W-PROGRESS-NEXT: verbosity :: xpass.txt +# QUIET-W-PROGRESS-EMPTY: +# QUIET-W-PROGRESS-EMPTY: +# QUIET-W-PROGRESS-NEXT: Total Discovered Tests: 5 +# QUIET-W-PROGRESS-NEXT: Failed : 1 (20.00%) +# QUIET-W-PROGRESS-NEXT: Unexpectedly Passed: 1 (20.00%) + +# RUN: not %{lit} --show-all --terse-summary %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix ALL-TERSE < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + +# ALL-TERSE: -- Testing: 5 tests, 1 workers -- +# ALL-TERSE-NEXT: FAIL: verbosity :: fail.txt (1 of 5) +# ALL-TERSE-NEXT: ******************** TEST 'verbosity :: fail.txt' FAILED ******************** +# ALL-TERSE-NEXT: Exit Code: 127 +# ALL-TERSE-EMPTY: +# ALL-TERSE-NEXT: Command Output (stdout): +# ALL-TERSE-NEXT: -- +# ALL-TERSE-NEXT: # {{R}}UN: at line 1 +# ALL-TERSE-NEXT: echo "fail test output" +# ALL-TERSE-NEXT: # executed command: echo 'fail test output' +# ALL-TERSE-NEXT: # .---command stdout------------ +# ALL-TERSE-NEXT: # | fail test output +# ALL-TERSE-NEXT: # `----------------------------- +# ALL-TERSE-NEXT: # {{R}}UN: at line 2 +# ALL-TERSE-NEXT: fail +# ALL-TERSE-NEXT: # executed command: fail +# ALL-TERSE-NEXT: # .---command stderr------------ +# ALL-TERSE-NEXT: # | 'fail': command not found +# ALL-TERSE-NEXT: # `----------------------------- +# ALL-TERSE-NEXT: # error: command failed with exit status: 127 +# ALL-TERSE-EMPTY: +# ALL-TERSE-NEXT: -- +# ALL-TERSE-EMPTY: +# ALL-TERSE-NEXT: ******************** +# ALL-TERSE-NEXT: PASS: verbosity :: pass.txt (2 of 5) +# ALL-TERSE-NEXT: Exit Code: 0 +# ALL-TERSE-EMPTY: +# ALL-TERSE-NEXT: Command Output (stdout): +# ALL-TERSE-NEXT: -- +# ALL-TERSE-NEXT: # {{R}}UN: at line 1 +# ALL-TERSE-NEXT: echo "pass test output" +# ALL-TERSE-NEXT: # executed command: echo 'pass test output' +# ALL-TERSE-NEXT: # .---command stdout------------ +# ALL-TERSE-NEXT: # | pass test output +# ALL-TERSE-NEXT: # `----------------------------- +# ALL-TERSE-EMPTY: +# ALL-TERSE-NEXT: -- +# ALL-TERSE-EMPTY: +# ALL-TERSE-NEXT: ******************** +# ALL-TERSE-NEXT: {{UN}}SUPPORTED: verbosity :: unsupported.txt (3 of 5) +# ALL-TERSE-NEXT: Test requires the following unavailable features: asdf +# ALL-TERSE-NEXT: ******************** +# ALL-TERSE-NEXT: {{X}}FAIL: verbosity :: xfail.txt (4 of 5) +# ALL-TERSE-NEXT: Exit Code: 1 +# ALL-TERSE-EMPTY: +# ALL-TERSE-NEXT: Command Output (stdout): +# ALL-TERSE-NEXT: -- +# ALL-TERSE-NEXT: # {{R}}UN: at line 2 +# ALL-TERSE-NEXT: not echo "xfail test output" +# ALL-TERSE-NEXT: # executed command: not echo 'xfail test output' +# ALL-TERSE-NEXT: # .---command stdout------------ +# ALL-TERSE-NEXT: # | xfail test output +# ALL-TERSE-NEXT: # `----------------------------- +# ALL-TERSE-NEXT: # error: command failed with exit status: 1 +# ALL-TERSE-EMPTY: +# ALL-TERSE-NEXT: -- +# ALL-TERSE-EMPTY: +# ALL-TERSE-NEXT: ******************** +# ALL-TERSE-NEXT: XPASS: verbosity :: xpass.txt (5 of 5) +# ALL-TERSE-NEXT: ******************** TEST 'verbosity :: xpass.txt' FAILED ******************** +# ALL-TERSE-NEXT: Exit Code: 0 +# ALL-TERSE-EMPTY: +# ALL-TERSE-NEXT: Command Output (stdout): +# ALL-TERSE-NEXT: -- +# ALL-TERSE-NEXT: # {{R}}UN: at line 2 +# ALL-TERSE-NEXT: echo "xpass test output" +# ALL-TERSE-NEXT: # executed command: echo 'xpass test output' +# ALL-TERSE-NEXT: # .---command stdout------------ +# ALL-TERSE-NEXT: # | xpass test output +# ALL-TERSE-NEXT: # `----------------------------- +# ALL-TERSE-EMPTY: +# ALL-TERSE-NEXT: -- +# ALL-TERSE-EMPTY: +# ALL-TERSE-NEXT: ******************** +# ALL-TERSE-NEXT: ******************** +# ALL-TERSE-NEXT: Failed Tests (1): +# ALL-TERSE-NEXT: verbosity :: fail.txt +# ALL-TERSE-EMPTY: +# ALL-TERSE-NEXT: ******************** +# ALL-TERSE-NEXT: Unexpectedly Passed Tests (1): +# ALL-TERSE-NEXT: verbosity :: xpass.txt +# ALL-TERSE-EMPTY: +# ALL-TERSE-EMPTY: +# ALL-TERSE-NEXT: Total Discovered Tests: 5 +# ALL-TERSE-NEXT: Failed : 1 (20.00%) +# ALL-TERSE-NEXT: Unexpectedly Passed: 1 (20.00%) + +# RUN: not %{lit} --show-all --diagnostic-level error %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix SHOW-ALL < %t/stdout.txt +# RUN: FileCheck %s --check-prefix QUIET-ERR --implicit-check-not lit < %t/stderr.txt + +# RUN: not %{lit} --show-all --test-output off %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + +# RUN: not %{lit} --succinct --print-result-after all %{inputs}/verbosity 2> %t/stderr.txt > %t/stdout.txt +# RUN: FileCheck %s --check-prefix SUCCINCT-RESULT-ALL < %t/stdout.txt +# RUN: FileCheck %s --check-prefix NO-ARGS-ERR --implicit-check-not lit < %t/stderr.txt + +# SUCCINCT-RESULT-ALL: -- Testing: 5 tests, 1 workers -- +# SUCCINCT-RESULT-ALL-NEXT: Testing: +# SUCCINCT-RESULT-ALL-NEXT: FAIL: verbosity :: fail.txt (1 of 5) +# SUCCINCT-RESULT-ALL-NEXT: Testing: 0.. 10. +# SUCCINCT-RESULT-ALL-NEXT: PASS: verbosity :: pass.txt (2 of 5) +# SUCCINCT-RESULT-ALL-NEXT: Testing: 0.. 10.. 20.. +# SUCCINCT-RESULT-ALL-NEXT: {{UN}}SUPPORTED: verbosity :: unsupported.txt (3 of 5) +# SUCCINCT-RESULT-ALL-NEXT: Testing: 0.. 10.. 20.. 30.. +# SUCCINCT-RESULT-ALL-NEXT: {{X}}FAIL: verbosity :: xfail.txt (4 of 5) +# SUCCINCT-RESULT-ALL-NEXT: Testing: 0.. 10.. 20.. 30.. 40.. +# SUCCINCT-RESULT-ALL-NEXT: XPASS: verbosity :: xpass.txt (5 of 5) +# SUCCINCT-RESULT-ALL-NEXT: Testing: 0.. 10.. 20.. 30.. 40.. 50.. 60.. 70.. 80.. 90.. +# SUCCINCT-RESULT-ALL-NEXT: ******************** +# SUCCINCT-RESULT-ALL-NEXT: Failed Tests (1): +# SUCCINCT-RESULT-ALL-NEXT: verbosity :: fail.txt +# SUCCINCT-RESULT-ALL-EMPTY: +# SUCCINCT-RESULT-ALL-NEXT: ******************** +# SUCCINCT-RESULT-ALL-NEXT: Unexpectedly Passed Tests (1): +# SUCCINCT-RESULT-ALL-NEXT: verbosity :: xpass.txt +# SUCCINCT-RESULT-ALL-EMPTY: +# SUCCINCT-RESULT-ALL-EMPTY: +# SUCCINCT-RESULT-ALL-NEXT: Testing Time: {{.*}}s +# SUCCINCT-RESULT-ALL-EMPTY: +# SUCCINCT-RESULT-ALL-NEXT: Total Discovered Tests: 5 +# SUCCINCT-RESULT-ALL-NEXT: Unsupported : 1 (20.00%) +# SUCCINCT-RESULT-ALL-NEXT: Passed : 1 (20.00%) +# SUCCINCT-RESULT-ALL-NEXT: Expectedly Failed : 1 (20.00%) +# SUCCINCT-RESULT-ALL-NEXT: Failed : 1 (20.00%) +# SUCCINCT-RESULT-ALL-NEXT: Unexpectedly Passed: 1 (20.00%)