diff --git a/llvm/utils/lit/lit/Test.py b/llvm/utils/lit/lit/Test.py index 4c94e9806a1601..a38ea4e7717a32 100644 --- a/llvm/utils/lit/lit/Test.py +++ b/llvm/utils/lit/lit/Test.py @@ -1,6 +1,5 @@ import itertools import os -from xml.sax.saxutils import quoteattr from json import JSONEncoder from lit.BooleanExpression import BooleanExpression @@ -10,35 +9,50 @@ class ResultCode(object): """Test result codes.""" + # All result codes (including user-defined ones) in declaration order + _all_codes = [] + + @staticmethod + def all_codes(): + return ResultCode._all_codes + # We override __new__ and __getnewargs__ to ensure that pickling still # provides unique ResultCode objects in any particular instance. _instances = {} - def __new__(cls, name, isFailure): + + def __new__(cls, name, label, isFailure): res = cls._instances.get(name) if res is None: cls._instances[name] = res = super(ResultCode, cls).__new__(cls) return res + def __getnewargs__(self): - return (self.name, self.isFailure) + return (self.name, self.label, self.isFailure) - def __init__(self, name, isFailure): + def __init__(self, name, label, isFailure): self.name = name + self.label = label self.isFailure = isFailure + ResultCode._all_codes.append(self) def __repr__(self): return '%s%r' % (self.__class__.__name__, (self.name, self.isFailure)) -PASS = ResultCode('PASS', False) -FLAKYPASS = ResultCode('FLAKYPASS', False) -XFAIL = ResultCode('XFAIL', False) -FAIL = ResultCode('FAIL', True) -XPASS = ResultCode('XPASS', True) -UNRESOLVED = ResultCode('UNRESOLVED', True) -UNSUPPORTED = ResultCode('UNSUPPORTED', False) -TIMEOUT = ResultCode('TIMEOUT', True) -SKIPPED = ResultCode('SKIPPED', False) -EXCLUDED = ResultCode('EXCLUDED', False) + +# Successes +EXCLUDED = ResultCode('EXCLUDED', 'Excluded', False) +SKIPPED = ResultCode('SKIPPED', 'Skipped', False) +UNSUPPORTED = ResultCode('UNSUPPORTED', 'Unsupported', False) +PASS = ResultCode('PASS', 'Passed', False) +FLAKYPASS = ResultCode('FLAKYPASS', 'Passed With Retry', False) +XFAIL = ResultCode('XFAIL', 'Expectedly Failed', False) +# Failures +UNRESOLVED = ResultCode('UNRESOLVED', 'Unresolved', True) +TIMEOUT = ResultCode('TIMEOUT', 'Timed Out', True) +FAIL = ResultCode('FAIL', 'Failed', True) +XPASS = ResultCode('XPASS', 'Unexpectedly Passed', True) + # Test metric values. diff --git a/llvm/utils/lit/lit/main.py b/llvm/utils/lit/lit/main.py index e1a6f226096500..860c584fbdf49f 100755 --- a/llvm/utils/lit/lit/main.py +++ b/llvm/utils/lit/lit/main.py @@ -259,46 +259,24 @@ def print_histogram(tests): lit.util.printHistogram(test_times, title='Tests') -def add_result_category(result_code, label): - assert isinstance(result_code, lit.Test.ResultCode) - category = (result_code, label) - result_codes.append(category) - - -result_codes = [ - # Passes - (lit.Test.EXCLUDED, 'Excluded'), - (lit.Test.SKIPPED, 'Skipped'), - (lit.Test.UNSUPPORTED, 'Unsupported'), - (lit.Test.PASS, 'Passed'), - (lit.Test.FLAKYPASS, 'Passed With Retry'), - (lit.Test.XFAIL, 'Expectedly Failed'), - # Failures - (lit.Test.UNRESOLVED, 'Unresolved'), - (lit.Test.TIMEOUT, 'Timed Out'), - (lit.Test.FAIL, 'Failed'), - (lit.Test.XPASS, 'Unexpectedly Passed') -] - - def print_results(tests, elapsed, opts): - tests_by_code = {code: [] for code, _ in result_codes} + tests_by_code = {code: [] for code in lit.Test.ResultCode.all_codes()} for test in tests: tests_by_code[test.result.code].append(test) - for (code, label) in result_codes: - print_group(code, label, tests_by_code[code], opts.show_results) + for code in lit.Test.ResultCode.all_codes(): + print_group(tests_by_code[code], code, opts.show_results) print_summary(tests_by_code, opts.quiet, elapsed) -def print_group(code, label, tests, show_results): +def print_group(tests, code, show_results): if not tests: return if not code.isFailure and code not in show_results: return print('*' * 20) - print('%s Tests (%d):' % (label, len(tests))) + print('{} Tests ({}):'.format(code.label, len(tests))) for test in tests: print(' %s' % test.getFullName()) sys.stdout.write('\n') @@ -308,8 +286,9 @@ def print_summary(tests_by_code, quiet, elapsed): if not quiet: print('\nTesting Time: %.2fs' % elapsed) - codes = [c for c in result_codes if not quiet or c.isFailure] - groups = [(label, len(tests_by_code[code])) for code, label in codes] + codes = [c for c in lit.Test.ResultCode.all_codes() + if not quiet or c.isFailure] + groups = [(c.label, len(tests_by_code[c])) for c in codes] groups = [(label, count) for label, count in groups if count] if not groups: return diff --git a/llvm/utils/lit/tests/Inputs/custom-result-category/format.py b/llvm/utils/lit/tests/Inputs/custom-result-category/format.py index b0c97ec71bb9a8..0ef1bf24f833e3 100644 --- a/llvm/utils/lit/tests/Inputs/custom-result-category/format.py +++ b/llvm/utils/lit/tests/Inputs/custom-result-category/format.py @@ -1,11 +1,8 @@ import lit import lit.formats -CUSTOM_PASS = lit.Test.ResultCode('CUSTOM_PASS', False) -CUSTOM_FAILURE = lit.Test.ResultCode('CUSTOM_FAILURE', True) - -lit.main.add_result_category(CUSTOM_PASS, "My Passed") -lit.main.add_result_category(CUSTOM_FAILURE, "My Failed") +CUSTOM_PASS = lit.Test.ResultCode('CUSTOM_PASS', 'My Passed', False) +CUSTOM_FAILURE = lit.Test.ResultCode('CUSTOM_FAILURE', 'My Failed', True) class MyFormat(lit.formats.ShTest):