From f01b55f25f54635cd91a9b51e07f06ebb513a8ba Mon Sep 17 00:00:00 2001 From: Adam Williamson Date: Fri, 2 Jun 2023 13:51:09 -0700 Subject: [PATCH] Rewrite summarize_answers to be more accurate The summaries are getting pretty inaccurate in certain cases. Incomplete tests aren't really "missing", but they're treated as such in the summary. Problems with gating.yaml files are counted as failed tests, when they really aren't. The special handling of errors (we kinda count them as both failures and errors) doesn't really make much sense. This rewrites summarize_answers entirely to give more accurate summaries, with logic that's hopefully clearer, and with only one iteration over the answers. It's fairly similar to the javascript code in Bodhi which does much the same work (we have to duplicate this in Bodhi so it can create a single combined summary for the results of multiple greenwave queries, when it has to run more than one query). Signed-off-by: Adam Williamson --- docs/messaging.rst | 4 +- docs/package-specific-policies.rst | 2 +- docs/policies.rst | 2 +- functional-tests/consumers/test_resultsdb.py | 16 +-- functional-tests/consumers/test_waiverdb.py | 4 +- functional-tests/test_api_v1.py | 42 +++---- greenwave/policies.py | 113 +++++++++++-------- greenwave/tests/test_api_v1.py | 21 ++-- greenwave/tests/test_policies.py | 16 +-- greenwave/tests/test_summary.py | 36 +++--- 10 files changed, 140 insertions(+), 116 deletions(-) diff --git a/docs/messaging.rst b/docs/messaging.rst index a4e7ba59..faf93ba2 100644 --- a/docs/messaging.rst +++ b/docs/messaging.rst @@ -37,7 +37,7 @@ new test result message from ResultsDB. "applicable_policies": ["osci_compose_modules"], "policies_satisfied": false, - "summary": "1 of 2 required test results missing", + "summary": "Of 2 required test(s), 1 result(s) missing", "satisfied_requirements": [{ "result_id": 7483048, "testcase": "osci.redhat-module.installability.functional", @@ -58,7 +58,7 @@ new test result message from ResultsDB. "previous": { "applicable_policies": ["osci_compose_modules"], "policies_satisfied": false, - "summary": "1 of 2 required tests failed", + "summary": "Of 2 required test(s), 1 test(s) failed", "satisfied_requirements": [{ "result_id": 7483048, "testcase": "osci.redhat-module.installability.functional", diff --git a/docs/package-specific-policies.rst b/docs/package-specific-policies.rst index 5b7a7a67..5776db6d 100644 --- a/docs/package-specific-policies.rst +++ b/docs/package-specific-policies.rst @@ -98,7 +98,7 @@ For such policy, missing remote rule file could result in the following decision "applicable_policies": ["some_policy"], "policies_satisfied": false, "satisfied_requirements": [] - "summary": "1 of 1 required tests failed", + "summary": "Of 1 required test(s), 1 test(s) failed", "unsatisfied_requirements": [{ "subject_identifier": "nethack-1.2.3-1.f31", "subject_type": "koji_build", diff --git a/docs/policies.rst b/docs/policies.rst index 060d71bd..56b9da61 100644 --- a/docs/policies.rst +++ b/docs/policies.rst @@ -317,7 +317,7 @@ decision. If a remote rule file exists it should contain a policy for each required decision context. If no tests are required for the particular decision context, there should be empty rules set, i.e. ``rules: []``. In this case the evaluation result -will be ``no tests are required``. If there is no decision context matching the +will be ``No tests are required``. If there is no decision context matching the original policy, the result will be ``Cannot find any applicable policies``. To be able to get remote rule file, Greenwave requires ``REMOTE_RULE_POLICIES`` diff --git a/functional-tests/consumers/test_resultsdb.py b/functional-tests/consumers/test_resultsdb.py index 4ea159d6..ffcc202d 100644 --- a/functional-tests/consumers/test_resultsdb.py +++ b/functional-tests/consumers/test_resultsdb.py @@ -89,7 +89,7 @@ def test_consume_new_result( 'source': None, } ], - 'summary': '2 of 3 required test results missing', + 'summary': 'Of 3 required test(s), 2 result(s) missing', 'subject': [ {'item': nvr, 'type': 'koji_build'}, ], @@ -101,7 +101,7 @@ def test_consume_new_result( 'applicable_policies': ['taskotron_release_critical_tasks_with_blocklist', 'taskotron_release_critical_tasks'], 'policies_satisfied': False, - 'summary': '3 of 3 required test results missing', + 'summary': 'Of 3 required test(s), 3 result(s) missing', 'satisfied_requirements': [], 'unsatisfied_requirements': [ { @@ -152,7 +152,7 @@ def test_consume_new_result( }, ], 'unsatisfied_requirements': [], - 'summary': 'All required tests passed', + 'summary': 'All required tests passed or waived', 'subject': [ {'item': nvr, 'type': 'koji_build'}, ], @@ -162,7 +162,7 @@ def test_consume_new_result( 'previous': { 'applicable_policies': ['taskotron_release_critical_tasks_for_testing'], 'policies_satisfied': False, - 'summary': '1 of 1 required test results missing', + 'summary': 'Of 1 required test(s), 1 result(s) missing', 'satisfied_requirements': [], 'unsatisfied_requirements': [ { @@ -260,7 +260,7 @@ def test_consume_compose_id_result( 'subject': [{'productmd.compose.id': compose_id}], 'subject_type': 'compose', 'subject_identifier': compose_id, - 'summary': '1 of 2 required test results missing', + 'summary': 'Of 2 required test(s), 1 result(s) missing', 'previous': old_decision, 'satisfied_requirements': [{ 'subject_type': 'compose', @@ -380,7 +380,7 @@ def test_consume_legacy_result( 'source': None, } ], - 'summary': '2 of 3 required test results missing', + 'summary': 'Of 3 required test(s), 2 result(s) missing', 'subject': [ { 'item': nvr, @@ -427,7 +427,7 @@ def test_consume_legacy_result( 'type': 'test-result-passed' }], 'unsatisfied_requirements': [], - 'summary': 'All required tests passed', + 'summary': 'All required tests passed or waived', 'subject': [ { 'item': nvr, @@ -628,7 +628,7 @@ def test_consume_new_result_container_image( 'subject': [{'item': item_hash, 'type': 'container-image'}], 'subject_type': 'container-image', 'subject_identifier': item_hash, - 'summary': 'All required tests passed', + 'summary': 'All required tests passed or waived', 'previous': old_decision, 'satisfied_requirements': [{ 'subject_type': 'container-image', diff --git a/functional-tests/consumers/test_waiverdb.py b/functional-tests/consumers/test_waiverdb.py index 21fd3929..ed2588e3 100644 --- a/functional-tests/consumers/test_waiverdb.py +++ b/functional-tests/consumers/test_waiverdb.py @@ -77,7 +77,7 @@ def test_consume_new_waiver( 'applicable_policies': ['taskotron_release_critical_tasks_with_blocklist', 'taskotron_release_critical_tasks'], 'policies_satisfied': False, - 'summary': '1 of 3 required tests failed', + 'summary': 'Of 3 required test(s), 1 test(s) failed', 'satisfied_requirements': [ { 'subject_type': 'koji_build', @@ -160,6 +160,6 @@ def test_consume_new_waiver( } ], 'unsatisfied_requirements': [], - 'summary': 'All required tests passed', + 'summary': 'All required tests passed or waived', 'testcase': testcase, } diff --git a/functional-tests/test_api_v1.py b/functional-tests/test_api_v1.py index 2829eef7..c7aca0c6 100644 --- a/functional-tests/test_api_v1.py +++ b/functional-tests/test_api_v1.py @@ -238,7 +238,7 @@ def test_make_a_decision_on_passed_result(requests_session, greenwave_server, te 'taskotron_release_critical_tasks_with_blocklist', 'taskotron_release_critical_tasks', ] - expected_summary = 'All required tests passed' + expected_summary = 'All required tests passed or waived' assert res_data['summary'] == expected_summary @@ -370,7 +370,7 @@ def test_make_a_decision_on_failed_result_with_waiver( assert res_data['policies_satisfied'] is True assert 'taskotron_release_critical_tasks' in res_data['applicable_policies'] assert 'taskotron_release_critical_tasks_with_blocklist' in res_data['applicable_policies'] - expected_summary = 'All required tests passed' + expected_summary = 'All required tests passed or waived' assert res_data['summary'] == expected_summary @@ -391,7 +391,7 @@ def test_make_a_decision_on_failed_result(requests_session, greenwave_server, te assert res_data['policies_satisfied'] is False assert 'taskotron_release_critical_tasks' in res_data['applicable_policies'] assert 'taskotron_release_critical_tasks_with_blocklist' in res_data['applicable_policies'] - expected_summary = '1 of 3 required tests failed, 2 results missing' + expected_summary = 'Of 3 required test(s), 1 test(s) failed, 2 result(s) missing' assert res_data['summary'] == expected_summary expected_unsatisfied_requirements = [ { @@ -437,7 +437,7 @@ def test_make_a_decision_on_queued_result(requests_session, greenwave_server, te assert res_data['policies_satisfied'] is False assert 'taskotron_release_critical_tasks' in res_data['applicable_policies'] assert 'taskotron_release_critical_tasks_with_blocklist' in res_data['applicable_policies'] - expected_summary = '3 of 3 required test results missing' + expected_summary = 'Of 3 required test(s), 1 test(s) incomplete, 2 result(s) missing' assert res_data['summary'] == expected_summary expected_unsatisfied_requirements = [ { @@ -483,7 +483,7 @@ def test_make_a_decision_on_running_result(requests_session, greenwave_server, t assert res_data['policies_satisfied'] is False assert 'taskotron_release_critical_tasks' in res_data['applicable_policies'] assert 'taskotron_release_critical_tasks_with_blocklist' in res_data['applicable_policies'] - expected_summary = '3 of 3 required test results missing' + expected_summary = 'Of 3 required test(s), 1 test(s) incomplete, 2 result(s) missing' assert res_data['summary'] == expected_summary expected_unsatisfied_requirements = [ { @@ -526,7 +526,7 @@ def test_make_a_decision_on_no_results(requests_session, greenwave_server, testd assert res_data['policies_satisfied'] is False assert 'taskotron_release_critical_tasks' in res_data['applicable_policies'] assert 'taskotron_release_critical_tasks_with_blocklist' in res_data['applicable_policies'] - expected_summary = '3 of 3 required test results missing' + expected_summary = 'Of 3 required test(s), 3 result(s) missing' assert res_data['summary'] == expected_summary expected_unsatisfied_requirements = [ { @@ -571,7 +571,7 @@ def test_make_a_decision_on_redhat_cont_image(requests_session, greenwave_server assert r.status_code == 200 res_data = r.json() assert res_data['policies_satisfied'] is False - expected_summary = '2 of 2 required tests failed' + expected_summary = 'Of 2 required test(s), 2 test(s) failed' assert res_data['summary'] == expected_summary expected_unsatisfied_requirements = [ { @@ -623,7 +623,7 @@ def test_subject_type_group(requests_session, greenwave_server, testdatabuilder) assert res_data['satisfied_requirements'][0]['type'] == 'test-result-passed' assert res_data['policies_satisfied'] is True - expected_summary = 'All required tests passed' + expected_summary = 'All required tests passed or waived' assert res_data['summary'] == expected_summary @@ -640,7 +640,7 @@ def test_empty_policy_is_always_satisfied( res_data = r.json() assert res_data['policies_satisfied'] is True assert res_data['applicable_policies'] == ['empty-policy'] - expected_summary = 'no tests are required' + expected_summary = 'No tests are required' assert res_data['summary'] == expected_summary assert res_data['unsatisfied_requirements'] == [] @@ -664,7 +664,7 @@ def test_bodhi_push_update_stable_policy( assert res_data['policies_satisfied'] is True assert 'taskotron_release_critical_tasks' in res_data['applicable_policies'] assert 'taskotron_release_critical_tasks_with_blocklist' in res_data['applicable_policies'] - expected_summary = 'All required tests passed' + expected_summary = 'All required tests passed or waived' assert res_data['summary'] == expected_summary assert res_data['unsatisfied_requirements'] == [] @@ -687,7 +687,7 @@ def test_bodhi_nonexistent_bodhi_update_policy( res_data = r.json() assert res_data['policies_satisfied'] is True assert res_data['applicable_policies'] == [] - assert res_data['summary'] == 'no tests are required' + assert res_data['summary'] == 'No tests are required' assert res_data['unsatisfied_requirements'] == [] @@ -723,7 +723,7 @@ def test_multiple_results_in_a_subject( assert res_data['policies_satisfied'] is False assert 'taskotron_release_critical_tasks' in res_data['applicable_policies'] assert 'taskotron_release_critical_tasks_with_blocklist' in res_data['applicable_policies'] - assert res_data['summary'] == '1 of 3 required tests failed' + assert res_data['summary'] == 'Of 3 required test(s), 1 test(s) failed' expected_unsatisfied_requirements = [ { 'item': {'item': nvr, 'type': 'koji_build'}, @@ -821,7 +821,7 @@ def test_make_a_decision_on_passed_result_with_scenario( res_data = r.json() assert res_data['policies_satisfied'] is True assert res_data['applicable_policies'] == ['openqa_important_stuff_for_rawhide'] - expected_summary = 'All required tests passed' + expected_summary = 'All required tests passed or waived' assert res_data['summary'] == expected_summary @@ -859,7 +859,7 @@ def test_make_a_decision_on_failing_result_with_scenario( res_data = r.json() assert res_data['policies_satisfied'] is False assert res_data['applicable_policies'] == ['openqa_important_stuff_for_rawhide'] - expected_summary = '1 of 2 required tests failed' + expected_summary = 'Of 2 required test(s), 1 test(s) failed' assert res_data['summary'] == expected_summary expected_unsatisfied_requirements = [{ 'item': {'productmd.compose.id': compose_id}, @@ -1049,7 +1049,7 @@ def test_make_a_decision_about_brew_build(requests_session, greenwave_server, te res_data = r.json() assert res_data['policies_satisfied'] is True assert res_data['applicable_policies'] == ['osci_compose'] - assert res_data['summary'] == 'All required tests passed' + assert res_data['summary'] == 'All required tests passed or waived' def test_validate_gating_yaml_valid(requests_session, greenwave_server): @@ -1295,7 +1295,7 @@ def test_decision_on_redhat_module(requests_session, greenwave_server, testdatab assert r.status_code == 200 res_data = r.json() assert res_data['policies_satisfied'] is True - expected_summary = 'All required tests passed' + expected_summary = 'All required tests passed or waived' assert res_data['summary'] == expected_summary assert res_data['results'][0]['data']['type'][0] == 'redhat-module' @@ -1319,7 +1319,7 @@ def test_verbose_retrieve_latest_results(requests_session, greenwave_server, tes assert r.status_code == 200 res_data = r.json() assert res_data['policies_satisfied'] is True - expected_summary = 'All required tests passed' + expected_summary = 'All required tests passed or waived' assert res_data['summary'] == expected_summary assert len(res_data['results']) == 3 for result in res_data['results']: @@ -1361,7 +1361,7 @@ def test_make_decision_passed_on_subject_type_bodhi_with_waiver( assert res_data['applicable_policies'] == [ 'bodhi-test-policy', ] - expected_summary = 'All required tests passed' + expected_summary = 'All required tests passed or waived' assert res_data['summary'] == expected_summary @@ -1420,7 +1420,7 @@ def test_verbose_retrieve_latest_results_scenario(requests_session, greenwave_se assert r.status_code == 200 res_data = r.json() assert res_data['policies_satisfied'] is True - expected_summary = 'All required tests passed' + expected_summary = 'All required tests passed or waived' assert res_data['summary'] == expected_summary assert len(res_data['results']) == 2 for result in res_data['results']: @@ -1699,7 +1699,7 @@ def test_make_a_decision_on_passed_result_with_custom_scenario( assert r.status_code == 200 res_data = r.json() assert res_data['policies_satisfied'] is True - assert res_data['summary'] == 'All required tests passed' + assert res_data['summary'] == 'All required tests passed or waived' assert res_data['satisfied_requirements'] == [ { 'subject_identifier': item1_nvr, @@ -1766,4 +1766,4 @@ def test_make_a_decision_all_scenarios_waived( for result in results ] assert res_data['policies_satisfied'] is True - assert res_data['summary'] == 'All required tests passed' + assert res_data['summary'] == 'All required tests passed or waived' diff --git a/greenwave/policies.py b/greenwave/policies.py index 0962bbde..4307f019 100644 --- a/greenwave/policies.py +++ b/greenwave/policies.py @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0+ +from collections import defaultdict from fnmatch import fnmatch import glob import logging @@ -140,6 +141,7 @@ class RuleNotSatisfied(Answer): """ is_satisfied = False + summary_text = "unexpected unsatisfied requirement(s)" def to_json(self): raise NotImplementedError() @@ -150,6 +152,12 @@ def to_waived(self, waiver_id): """ raise NotImplementedError() + def update_summary(self, summary): + if self.is_test_result: + summary.test_msgs[self.summary_text] += 1 + else: + summary.non_test_msgs[self.summary_text] += 1 + class TestResultMissing(RuleNotSatisfied): """ @@ -157,6 +165,8 @@ class TestResultMissing(RuleNotSatisfied): ResultsDB with a matching item and test case name). """ + summary_text = "result(s) missing" + def __init__(self, subject, test_case_name, scenario, source): self.subject = subject self.test_case_name = test_case_name @@ -186,6 +196,8 @@ class TestResultIncomplete(RuleNotSatisfied): result outcomes in ResultsDB with a matching item and test case name). """ + summary_text = "test(s) incomplete" + def __init__(self, subject, test_case_name, source, result_id, data): self.subject = subject self.test_case_name = test_case_name @@ -245,6 +257,8 @@ class TestResultFailed(RuleNotSatisfied): not passing). """ + summary_text = "test(s) failed" + def __init__(self, subject, test_case_name, source, result_id, data): self.subject = subject self.test_case_name = test_case_name @@ -284,6 +298,8 @@ class TestResultErrored(RuleNotSatisfied): was an error). """ + summary_text = "test(s) errored" + def __init__( self, subject, @@ -331,6 +347,8 @@ class InvalidRemoteRuleYaml(RuleNotSatisfied): """ scenario = None + is_test_result = False + summary_text = "non-test-result unsatisfied requirement(s) (gating.yaml issues)" def __init__(self, subject, test_case_name, details, source): self.subject = subject @@ -360,6 +378,8 @@ class MissingRemoteRuleYaml(RuleNotSatisfied): test_case_name = 'missing-gating-yaml' scenario = None + is_test_result = False + summary_text = "non-test-result unsatisfied requirement(s) (gating.yaml issues)" def __init__(self, subject, sources): self.subject = subject @@ -384,9 +404,10 @@ class FailedFetchRemoteRuleYaml(RuleNotSatisfied): Error while fetching remote policy. """ - scenario = None - test_case_name = 'failed-fetch-gating-yaml' + scenario = None + is_test_result = False + summary_text = "non-test-result unsatisfied requirement(s) (gating.yaml issues)" def __init__(self, subject, sources, error): self.subject = subject @@ -458,6 +479,9 @@ class ExcludedInPolicy(RuleSatisfied): """ Package was excluded in policy. """ + + is_test_result = False + def __init__(self, subject_identifier, policy): self.subject_identifier = subject_identifier self.policy = policy @@ -471,35 +495,26 @@ def to_json(self): } -def _summarize_answers_without_errored(answers): - failure_count = sum( - 1 for answer in answers - if isinstance(answer, RuleNotSatisfied) - ) - missing_count = sum( - 1 for answer in answers - if isinstance(answer, (TestResultIncomplete, TestResultMissing)) - ) - - # Missing results are also failures but we will distinguish between those - # two in summary message. - failure_count -= missing_count - - if failure_count and missing_count: - return '{} of {} required tests failed, {} result{} missing'.format( - failure_count, len(answers), missing_count, 's' if missing_count > 1 else '') - - if failure_count > 0: - return '{} of {} required tests failed'.format(failure_count, len(answers)) - - if missing_count > 0: - return '{} of {} required test results missing'.format(missing_count, len(answers)) - - if all(answer.is_satisfied for answer in answers): - return 'All required tests passed' - - logging.error('Unexpected unsatisfied result') - return 'inexplicable result' +class _Summary(object): + """ + Internal class using for generating the result summary. + """ + def __init__(self): + self.test_msgs = defaultdict(int) + self.non_test_msgs = defaultdict(int) + + def to_text(self, test_count): + msgstr = "" + if self.non_test_msgs: + msgstr = ", ".join(f"{num} {msg}" for (msg, num) in self.non_test_msgs.items()) + if self.test_msgs: + addmsg = f"Of {test_count} required test(s), " + addmsg += ", ".join(f"{num} {msg}" for (msg, num) in self.test_msgs.items()) + if msgstr: + msgstr = f"{msgstr}. {addmsg}" + else: + msgstr = addmsg + return msgstr def summarize_answers(answers): @@ -512,23 +527,25 @@ def summarize_answers(answers): Returns: str: Human-readable summary. """ - test_answers = [ - answer for answer in answers - if answer.is_test_result - ] - if not test_answers: - return 'no tests are required' - - summary = _summarize_answers_without_errored(test_answers) - - errored_count = len([ - answer for answer in test_answers - if isinstance(answer, TestResultErrored) - ]) - if errored_count: - summary += f' ({errored_count} {"error" if errored_count == 1 else "errors"})' - - return summary + summary = _Summary() + test_count = 0 + for answer in answers: + if isinstance(answer, RuleNotSatisfied): + answer.update_summary(summary) + if answer.is_test_result: + test_count += 1 + + msgstr = summary.to_text(test_count) + if msgstr: + return msgstr + + # if we got here, there should be no unsatisfied results + if test_count: + # this means we had some passed/waived tests + return 'All required tests passed or waived' + + # otherwise, should mean we had no required tests + return 'No tests are required' class Rule(SafeYAMLObject): diff --git a/greenwave/tests/test_api_v1.py b/greenwave/tests/test_api_v1.py index b290bc04..b13b7b89 100644 --- a/greenwave/tests/test_api_v1.py +++ b/greenwave/tests/test_api_v1.py @@ -72,7 +72,7 @@ def test_make_decision_retrieves_waivers_on_missing(mock_results, mock_waivers, mock_waivers.return_value = [] response = make_decision() assert 200 == response.status_code - assert '1 of 1 required test results missing' == response.json['summary'] + assert 'Of 1 required test(s), 1 result(s) missing' == response.json['summary'] mock_waivers.assert_called_once() @@ -81,7 +81,7 @@ def test_make_decision_retrieves_waivers_on_failed(mock_results, mock_waivers, m mock_waivers.return_value = [] response = make_decision() assert 200 == response.status_code - assert '1 of 1 required tests failed' == response.json['summary'] + assert 'Of 1 required test(s), 1 test(s) failed' == response.json['summary'] mock_waivers.assert_called_once() @@ -91,7 +91,7 @@ def test_make_decision_retrieves_waivers_omitted_on_passed( mock_waivers.return_value = [] response = make_decision() assert 200 == response.status_code - assert 'All required tests passed' == response.json['summary'] + assert 'All required tests passed or waived' == response.json['summary'] mock_waivers.assert_not_called() @@ -100,7 +100,7 @@ def test_make_decision_retrieves_waivers_on_errored(mock_results, mock_waivers, mock_waivers.return_value = [] response = make_decision() assert 200 == response.status_code - assert '1 of 1 required tests failed (1 error)' == response.json['summary'] + assert 'Of 1 required test(s), 1 test(s) errored' == response.json['summary'] mock_waivers.assert_called_once() @@ -110,7 +110,7 @@ def test_make_decision_retrieves_waivers_once_on_verbose_and_missing( mock_waivers.return_value = [] response = make_decision(verbose=True) assert 200 == response.status_code - assert '1 of 1 required test results missing' == response.json['summary'] + assert 'Of 1 required test(s), 1 result(s) missing' == response.json['summary'] mock_waivers.assert_called_once() @@ -128,7 +128,7 @@ def test_make_decision_with_no_tests_required(mock_results, mock_waivers, make_d """ response = make_decision(policies=policies) assert 200 == response.status_code - assert 'no tests are required' == response.json['summary'] + assert 'No tests are required' == response.json['summary'] mock_waivers.assert_not_called() @@ -152,7 +152,7 @@ def test_make_decision_with_no_tests_required_and_missing_gating_yaml( f.return_value = None response = make_decision(policies=policies) assert 200 == response.status_code - assert 'no tests are required' == response.json['summary'] + assert 'No tests are required' == response.json['summary'] mock_waivers.assert_not_called() @@ -195,7 +195,7 @@ def test_make_decision_with_no_tests_required_and_empty_remote_rules( f.return_value = remote_fragment1 response = make_decision(policies=policies) assert 200 == response.status_code - assert 'no tests are required' == response.json['summary'] + assert 'No tests are required' == response.json['summary'] mock_waivers.assert_not_called() with mock.patch('greenwave.resources.retrieve_yaml_remote_rule') as f: @@ -227,7 +227,8 @@ def test_make_decision_with_missing_required_gating_yaml(mock_results, mock_waiv response = make_decision(policies=policies) assert 200 == response.status_code assert not response.json['policies_satisfied'] - assert '1 of 1 required tests failed' == response.json['summary'] + exp = '1 non-test-result unsatisfied requirement(s) (gating.yaml issues)' + assert exp == response.json['summary'] mock_waivers.assert_called_once() @@ -264,7 +265,7 @@ def test_make_decision_multiple_contexts(mock_results, mock_waivers, make_decisi """ response = make_decision(policies=policies, decision_context=["test_policies", "test_2"]) assert 200 == response.status_code - assert '2 of 2 required tests failed' == response.json['summary'] + assert 'Of 2 required test(s), 2 test(s) failed' == response.json['summary'] assert ['test_policy', 'test_policy_2'] == response.json['applicable_policies'] mock_waivers.assert_called_once() diff --git a/greenwave/tests/test_policies.py b/greenwave/tests/test_policies.py index 94f189a6..29861f2d 100644 --- a/greenwave/tests/test_policies.py +++ b/greenwave/tests/test_policies.py @@ -83,17 +83,17 @@ def test_summarize_answers(): testResultMissing = TestResultMissing(testSubject, 'test', None, None) assert summarize_answers([testResultPassed]) == \ - 'All required tests passed' + 'All required tests passed or waived' assert summarize_answers([testResultFailed, testResultPassed]) == \ - '1 of 2 required tests failed' + 'Of 2 required test(s), 1 test(s) failed' assert summarize_answers([testResultMissing]) == \ - '1 of 1 required test results missing' - assert summarize_answers([testResultMissing, testResultFailed]) == \ - '1 of 2 required tests failed, 1 result missing' - assert summarize_answers([testResultMissing, testResultMissing, testResultFailed]) == \ - '1 of 3 required tests failed, 2 results missing' + 'Of 1 required test(s), 1 result(s) missing' + assert summarize_answers([testResultFailed, testResultMissing]) == \ + 'Of 2 required test(s), 1 test(s) failed, 1 result(s) missing' + assert summarize_answers([testResultFailed, testResultMissing, testResultMissing]) == \ + 'Of 3 required test(s), 1 test(s) failed, 2 result(s) missing' assert summarize_answers([testResultMissing, testResultPassed]) == \ - '1 of 2 required test results missing' + 'Of 2 required test(s), 1 result(s) missing' def test_decision_with_missing_result(tmpdir): diff --git a/greenwave/tests/test_summary.py b/greenwave/tests/test_summary.py index 60e381b2..58ccf82b 100644 --- a/greenwave/tests/test_summary.py +++ b/greenwave/tests/test_summary.py @@ -32,47 +32,47 @@ def test_summary_passed(): answers = [ testResultPassed, ] - assert summarize_answers(answers) == 'All required tests passed' + assert summarize_answers(answers) == 'All required tests passed or waived' def test_summary_empty(): answers = [] - assert summarize_answers(answers) == 'no tests are required' + assert summarize_answers(answers) == 'No tests are required' def test_summary_failed(): answers = [ testResultFailed, ] - assert summarize_answers(answers) == '1 of 1 required tests failed' + assert summarize_answers(answers) == 'Of 1 required test(s), 1 test(s) failed' def test_summary_incomplete(): answers = [ testResultIncomplete, ] - assert summarize_answers(answers) == '1 of 1 required test results missing' + assert summarize_answers(answers) == 'Of 1 required test(s), 1 test(s) incomplete' def test_summary_missing(): answers = [ testResultMissing, ] - assert summarize_answers(answers) == '1 of 1 required test results missing' + assert summarize_answers(answers) == 'Of 1 required test(s), 1 result(s) missing' def test_summary_missing_waived(): answers = [ TestResultWaived(testResultMissing, 123), ] - assert summarize_answers(answers) == 'All required tests passed' + assert summarize_answers(answers) == 'All required tests passed or waived' def test_summary_errored(): answers = [ testResultErrored, ] - assert summarize_answers(answers) == '1 of 1 required tests failed (1 error)' + assert summarize_answers(answers) == 'Of 1 required test(s), 1 test(s) errored' def test_summary_one_passed_one_failed(): @@ -80,7 +80,7 @@ def test_summary_one_passed_one_failed(): testResultPassed, testResultFailed, ] - assert summarize_answers(answers) == '1 of 2 required tests failed' + assert summarize_answers(answers) == 'Of 2 required test(s), 1 test(s) failed' def test_summary_one_passed_one_missing(): @@ -88,7 +88,7 @@ def test_summary_one_passed_one_missing(): testResultPassed, testResultMissing, ] - assert summarize_answers(answers) == '1 of 2 required test results missing' + assert summarize_answers(answers) == 'Of 2 required test(s), 1 result(s) missing' def test_summary_one_passed_one_missing_waived(): @@ -96,7 +96,7 @@ def test_summary_one_passed_one_missing_waived(): testResultPassed, TestResultWaived(testResultMissing, 123), ] - assert summarize_answers(answers) == 'All required tests passed' + assert summarize_answers(answers) == 'All required tests passed or waived' def test_summary_one_failed_one_missing(): @@ -104,7 +104,8 @@ def test_summary_one_failed_one_missing(): testResultFailed, testResultMissing, ] - assert summarize_answers(answers) == '1 of 2 required tests failed, 1 result missing' + exp = 'Of 2 required test(s), 1 test(s) failed, 1 result(s) missing' + assert summarize_answers(answers) == exp def test_summary_one_passed_one_failed_one_missing(): @@ -113,7 +114,8 @@ def test_summary_one_passed_one_failed_one_missing(): testResultFailed, testResultMissing, ] - assert summarize_answers(answers) == '1 of 3 required tests failed, 1 result missing' + exp = 'Of 3 required test(s), 1 test(s) failed, 1 result(s) missing' + assert summarize_answers(answers) == exp def test_summary_one_passed_one_failed_one_missing_two_errored(): @@ -124,14 +126,16 @@ def test_summary_one_passed_one_failed_one_missing_two_errored(): testResultMissing, testResultErrored, ] - assert summarize_answers(answers) == '3 of 5 required tests failed, 1 result missing (2 errors)' + exp = 'Of 5 required test(s), 2 test(s) errored, 1 test(s) failed, 1 result(s) missing' + assert summarize_answers(answers) == exp def test_summary_invalid_gating_yaml(): answers = [ testInvalidGatingYaml, ] - assert summarize_answers(answers) == '1 of 1 required tests failed' + exp = '1 non-test-result unsatisfied requirement(s) (gating.yaml issues)' + assert summarize_answers(answers) == exp def test_summary_one_passed_one_invalid_gating_yaml_one_missing(): @@ -140,4 +144,6 @@ def test_summary_one_passed_one_invalid_gating_yaml_one_missing(): testResultMissing, testInvalidGatingYaml, ] - assert summarize_answers(answers) == '1 of 3 required tests failed, 1 result missing' + exp = '1 non-test-result unsatisfied requirement(s) (gating.yaml issues). ' + exp += 'Of 2 required test(s), 1 result(s) missing' + assert summarize_answers(answers) == exp