Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Dev][Util] Have unittest respect the --start-offset parameter when used alongside -l #10277

Merged
merged 4 commits into from Jan 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
66 changes: 32 additions & 34 deletions scripts/run_tests_one_by_one.py
Expand Up @@ -4,35 +4,28 @@
import os
import time

no_exit = False
profile = False
assertions = True

for i in range(len(sys.argv)):
if sys.argv[i] == '--no-exit':
no_exit = True
del sys.argv[i]
i -= 1
elif sys.argv[i] == '--profile':
profile = True
del sys.argv[i]
i -= 1
elif sys.argv[i] == '--no-assertions':
assertions = False
del sys.argv[i]
i -= 1

if len(sys.argv) < 2:
print(
"Expected usage: python3 scripts/run_tests_one_by_one.py build/debug/test/unittest [--no-exit] [--profile] [--no-assertions]"
)
exit(1)
unittest_program = sys.argv[1]
extra_args = []
if len(sys.argv) > 2:
extra_args = [sys.argv[2]]
import argparse

parser = argparse.ArgumentParser(description='Run tests one by one with optional flags.')
parser.add_argument('unittest_program', help='Path to the unittest program')
parser.add_argument('--no-exit', action='store_true', help='Do not exit after running tests')
parser.add_argument('--profile', action='store_true', help='Enable profiling')
parser.add_argument('--no-assertions', action='store_false', help='Disable assertions')
parser.add_argument('--time_execution', action='store_true', help='Measure and print the execution time of each test')

args, extra_args = parser.parse_known_args()

if not args.unittest_program:
parser.error('Path to unittest program is required')

# Access the arguments
unittest_program = args.unittest_program
no_exit = args.no_exit
profile = args.profile
assertions = args.no_assertions
time_execution = args.time_execution

# Use the '-l' parameter to output the list of tests to run
proc = subprocess.Popen([unittest_program, '-l'] + extra_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = proc.stdout.read().decode('utf8')
stderr = proc.stderr.read().decode('utf8')
Expand All @@ -43,6 +36,7 @@
print(stderr)
exit(1)

# The output is in the format of 'PATH\tGROUP', we're only interested in the PATH portion
test_cases = []
first_line = True
for line in stdout.splitlines():
Expand Down Expand Up @@ -72,20 +66,24 @@ def parse_assertions(stdout):
return ""


for test_number in range(test_count):
for test_number, test_case in enumerate(test_cases):
if not profile:
print("[" + str(test_number) + "/" + str(test_count) + "]: " + test_cases[test_number], end="")
print("[" + str(test_number) + "/" + str(test_count) + "]: " + test_case, end="")
start = time.time()
res = subprocess.run([unittest_program, test_cases[test_number]], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
res = subprocess.run([unittest_program, test_case], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = res.stdout.decode('utf8')
stderr = res.stderr.decode('utf8')
end = time.time()

additional_data = ""
if assertions:
print(" (" + parse_assertions(stdout) + ")")
else:
print()
additional_data += " (" + parse_assertions(stdout) + ")"
if args.time_execution:
additional_data += f" (Time: {end - start:.4f} seconds)"

print(additional_data)
if profile:
print(f'{test_cases[test_number]} {end - start}')
print(f'{test_case} {end - start}')
if res.returncode is not None and res.returncode != 0:
print("FAILURE IN RUNNING TEST")
print(
Expand Down
18 changes: 17 additions & 1 deletion third_party/catch/catch.hpp
Expand Up @@ -11308,12 +11308,28 @@ namespace Catch {
Catch::cout() << "name\tgroup" << std::endl;

auto matchedTestCases = filterTests( getAllTestCasesSorted( config ), testSpec, config );
for( auto const& testCaseInfo : matchedTestCases ) {
auto total_tests_run = matchedTestCases.size();
int start_offset = 0;
int end_offset = total_tests_run;
if (config.startOffset() >= 0) {
start_offset = config.startOffset();
} else if (config.startOffsetPercentage() >= 0) {
start_offset = int((config.startOffsetPercentage() / 100.0) * total_tests_run);
}
auto it = matchedTestCases.begin();
for(int current_test = 0; it != matchedTestCases.end(); current_test++) {
if (current_test < start_offset || current_test >= end_offset) {
// skip this test
it++;
continue;
}
auto &testCaseInfo = *it;
Catch::cout() << testCaseInfo.name << "\t";
if( !testCaseInfo.tags.empty() ) {
Catch::cout() << testCaseInfo.tagsAsString();
}
Catch::cout() << std::endl;
it++;
}
return matchedTestCases.size();
}
Expand Down