Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update chromium logger to identify subtest failures. #21763

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
17 changes: 12 additions & 5 deletions tools/wptrunner/wptrunner/formatters/chromium.py
Expand Up @@ -55,13 +55,14 @@ def _append_test_message(self, test, subtest, status, expected, message):
prefix += "%s: " % subtest
self.messages[test] += prefix + message + "\n"

def _store_test_result(self, name, actual, expected, message):
def _store_test_result(self, name, actual, expected, message, subtest_failure=False):
"""
Stores the result of a single test in |self.tests|
:param str name: name of the test.
:param str actual: actual status of the test.
:param str expected: expected statuses of the test.
:param str message: test output, such as status, subtest, errors etc.
:param bool subtest_failure: whether this test failed because of subtests
"""
# The test name can contain a leading / which will produce an empty
# string in the first position of the list returned by split. We use
Expand All @@ -72,8 +73,12 @@ def _store_test_result(self, name, actual, expected, message):
cur_dict = cur_dict.setdefault(name_part, {})
cur_dict["actual"] = actual
cur_dict["expected"] = expected
if message != "":
cur_dict["artifacts"] = {"log": message}
if subtest_failure or message:
cur_dict["artifacts"] = {"log": ""}
if subtest_failure:
cur_dict["artifacts"]["log"] += "subtest_failure\n"
if message != "":
cur_dict["artifacts"]["log"] += message

# Figure out if there was a regression or unexpected status. This only
# happens for tests that were run
Expand Down Expand Up @@ -157,17 +162,19 @@ def test_status(self, data):
def test_end(self, data):
test_name = data["test"]
actual_status = self._map_status_name(data["status"])
expected_statuses = self._get_expected_status_from_data(actual_status, data)
LukeZielinski marked this conversation as resolved.
Show resolved Hide resolved
subtest_failure = False
if actual_status == "PASS" and test_name in self.tests_with_subtest_fails:
# This test passed but it has failing subtests, so we flip the status
# to FAIL.
actual_status = "FAIL"
subtest_failure = True
# Clean up the test list to avoid accumulating too many.
self.tests_with_subtest_fails.remove(test_name)

expected_statuses = self._get_expected_status_from_data(actual_status, data)
if "message" in data:
self._append_test_message(test_name, None, actual_status, expected_statuses, data["message"])
self._store_test_result(test_name, actual_status, expected_statuses, self.messages[test_name])
self._store_test_result(test_name, actual_status, expected_statuses, self.messages[test_name], subtest_failure)

# Remove the test from messages dict to avoid accumulating too many.
self.messages.pop(test_name)
Expand Down
12 changes: 9 additions & 3 deletions tools/wptrunner/wptrunner/formatters/tests/test_chromium.py
Expand Up @@ -157,7 +157,8 @@ def test_subtest_messages(capfd):
output_json = json.load(output)

t1_log = output_json["tests"]["t1"]["artifacts"]["log"]
assert t1_log == "[FAIL expected PASS] t1_a: t1_a_message\n" \
assert t1_log == "subtest_failure\n" \
"[FAIL expected PASS] t1_a: t1_a_message\n" \
"[PASS] t1_b: t1_b_message\n"

t2_log = output_json["tests"]["t2"]["artifacts"]["log"]
Expand Down Expand Up @@ -203,12 +204,14 @@ def test_subtest_failure(capfd):

test_obj = output_json["tests"]["t1"]
t1_log = test_obj["artifacts"]["log"]
assert t1_log == "[FAIL expected PASS] t1_a: t1_a_message\n" \
assert t1_log == "subtest_failure\n" \
"[FAIL expected PASS] t1_a: t1_a_message\n" \
"[PASS] t1_b: t1_b_message\n" \
"[TIMEOUT expected PASS] t1_c: t1_c_message\n"
# The status of the test in the output is a failure because subtests failed,
# despite the harness reporting that the test passed.
assert test_obj["actual"] == "FAIL"
assert test_obj["expected"] == "PASS"
# Also ensure that the formatter cleaned up its internal state
assert "t1" not in formatter.tests_with_subtest_fails

Expand Down Expand Up @@ -259,6 +262,7 @@ def test_expected_subtest_failure(capfd):
# The status of the test in the output is a pass because the subtest
# failures were all expected.
assert test_obj["actual"] == "PASS"
assert test_obj["expected"] == "PASS"


def test_unexpected_subtest_pass(capfd):
Expand Down Expand Up @@ -297,9 +301,11 @@ def test_unexpected_subtest_pass(capfd):

test_obj = output_json["tests"]["t1"]
t1_log = test_obj["artifacts"]["log"]
assert t1_log == "[PASS expected FAIL] t1_a: t1_a_message\n"
assert t1_log == "subtest_failure\n" \
"[PASS expected FAIL] t1_a: t1_a_message\n"
# Since the subtest status is unexpected, we fail the test.
assert test_obj["actual"] == "FAIL"
assert test_obj["expected"] == "PASS"
# Also ensure that the formatter cleaned up its internal state
assert "t1" not in formatter.tests_with_subtest_fails

Expand Down