Skip to content

Commit

Permalink
[run-webkit-tests] Manager shouldn't treat all IOErrors invalid --tes…
Browse files Browse the repository at this point in the history
…t-list paths

https://bugs.webkit.org/show_bug.cgi?id=271180

Reviewed by Jonathan Bedard and Darin Adler.

Currently this means all IOErrors are swallowed by Manager, which is
awkward when we have an actual bug (rdar://124918090) here, which should
show up as an uncaught exception rather than silently failing.

* Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_finder_legacy.py:
(LayoutTestFinder._read_test_names_from_file):
* Tools/Scripts/webkitpy/layout_tests/controllers/manager.py:
(Manager.run):
(Manager.print_expectations):
(Manager.print_summary):

Canonical link: https://commits.webkit.org/276304@main
  • Loading branch information
gsnedders committed Mar 18, 2024
1 parent dc87670 commit f342b3f
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 28 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -131,19 +131,13 @@ def _read_test_names_from_file(self, filenames, test_path_separator):
fs = self._filesystem
tests = []
for filename in filenames:
try:
if test_path_separator != fs.sep:
filename = filename.replace(test_path_separator, fs.sep)
file_contents = fs.read_text_file(filename).split('\n')
for line in file_contents:
line = self._strip_comments(line)
if line:
tests.append(line)
except IOError as e:
if e.errno == errno.ENOENT:
_log.critical('')
_log.critical('--test-list file "{}" not found'.format(filenames))
raise
if test_path_separator != fs.sep:
filename = filename.replace(test_path_separator, fs.sep)
file_contents = fs.read_text_file(filename).split('\n')
for line in file_contents:
line = self._strip_comments(line)
if line:
tests.append(line)
return tests

@staticmethod
Expand Down
39 changes: 24 additions & 15 deletions Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -319,12 +319,15 @@ def _set_up_run(self, test_inputs, device_type):
def run(self, args):
num_failed_uploads = 0

if self._options.test_list:
for list_path in self._options.test_list:
if not self._port.host.filesystem.isfile(list_path):
_log.critical('')
_log.critical('--test-list file "{}" not found'.format(list_path))
return test_run_results.RunDetails(exit_code=-1)

device_type_list = self._port.supported_device_types()
try:
tests_to_run_by_device, aggregate_tests_to_skip = self._collect_tests(args, device_type_list)
except IOError:
# This is raised if --test-list doesn't exist
return test_run_results.RunDetails(exit_code=-1)
tests_to_run_by_device, aggregate_tests_to_skip = self._collect_tests(args, device_type_list)

aggregate_tests_to_run = set() # type: Set[Test]
for v in tests_to_run_by_device.values():
Expand Down Expand Up @@ -748,11 +751,14 @@ def _print_expectations_for_subset(self, device_type, test_col_width, tests_to_r
def print_expectations(self, args):
device_type_list = self._port.supported_device_types()

try:
tests_to_run_by_device, aggregate_tests_to_skip = self._collect_tests(args, device_type_list)
except IOError:
# This is raised if --test-list doesn't exist
return -1
if self._options.test_list:
for list_path in self._options.test_list:
if not self._port.host.filesystem.isfile(list_path):
_log.critical('')
_log.critical('--test-list file "{}" not found'.format(list_path))
return -1

tests_to_run_by_device, aggregate_tests_to_skip = self._collect_tests(args, device_type_list)

aggregate_tests_to_run = set()
for v in tests_to_run_by_device.values():
Expand All @@ -773,11 +779,14 @@ def print_summary(self, args):
device_type_list = self._port.supported_device_types()
test_stats = {}

try:
self._collect_tests(args, device_type_list)
except IOError:
# This is raised if --test-list doesn't exist
return test_run_results.RunDetails(exit_code=-1)
if self._options.test_list:
for list_path in self._options.test_list:
if not self._port.host.filesystem.isfile(list_path):
_log.critical('')
_log.critical('--test-list file "{}" not found'.format(list_path))
return test_run_results.RunDetails(exit_code=-1)

self._collect_tests(args, device_type_list)

for device_type, expectations in self._expectations.items():
test_stats[device_type] = {'__root__': {'count': 0, 'skip': 0, 'pass': 0, 'flaky': 0, 'fail': 0, 'has_tests': False}}
Expand Down

0 comments on commit f342b3f

Please sign in to comment.