diff --git a/.github/workflows/e2e_lighthouse_performance_acceptance_tests.yml b/.github/workflows/e2e_lighthouse_performance_acceptance_tests.yml index bc9aede1e080..2fde77e6eca0 100644 --- a/.github/workflows/e2e_lighthouse_performance_acceptance_tests.yml +++ b/.github/workflows/e2e_lighthouse_performance_acceptance_tests.yml @@ -416,8 +416,12 @@ jobs: run: python -m scripts.build --prod_env - name: Run Desktop Acceptance Test ${{ matrix.suite }} run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_acceptance_tests --skip-build --suite=${{ matrix.suite }} --prod_env + - name: Display Desktop Acceptance Test Output + run: cat test_output.log - name: Run Mobile Acceptance Test ${{ matrix.suite }} run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_acceptance_tests --skip-build --suite=${{ matrix.suite }} --prod_env --mobile + - name: Display Mobile Acceptance Test Output + run: cat test_output.log - name: Uploading webpack bundles as an artifact if: ${{ failure() }} uses: actions/upload-artifact@v3 diff --git a/scripts/run_acceptance_tests.py b/scripts/run_acceptance_tests.py index 234d3826b7cd..38e0022ffe65 100644 --- a/scripts/run_acceptance_tests.py +++ b/scripts/run_acceptance_tests.py @@ -1,4 +1,4 @@ -# Copyright 2023 The Oppia Authors. All Rights Reserved. +# Copyright 2024 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. @@ -99,6 +99,14 @@ def compile_test_ts_files() -> None: os.path.join(build_dir_path, 'data')) +def print_test_output(output_lines: List[bytes]) -> None: + """Print all the test output lines to a separate file.""" + with open('test_output.log', 'w', encoding='utf-8') as output_file: + for line in output_lines: + line_text = line.decode('utf-8') + output_file.write(line_text + '\n') + + def run_tests(args: argparse.Namespace) -> Tuple[List[bytes], int]: """Run the scripts to start acceptance tests.""" if common.is_oppia_server_already_running(): @@ -167,6 +175,8 @@ def run_tests(args: argparse.Namespace) -> Tuple[List[bytes], int]: if proc.poll() is not None: break + print_test_output(output_lines) + return_value = output_lines, proc.returncode return return_value @@ -178,6 +188,9 @@ def main(args: Optional[List[str]] = None) -> None: with servers.managed_portserver(): _, return_code = run_tests(parsed_args) + with open('test_output.log', 'r', encoding='utf-8') as output_file: + print(output_file.read()) + sys.exit(return_code) diff --git a/scripts/run_acceptance_tests_test.py b/scripts/run_acceptance_tests_test.py index a4f15c109f8f..cbaaaf88768e 100644 --- a/scripts/run_acceptance_tests_test.py +++ b/scripts/run_acceptance_tests_test.py @@ -1,4 +1,4 @@ -# Copyright 2023 The Oppia Authors. All Rights Reserved. +# Copyright 2024 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,8 +17,10 @@ from __future__ import annotations import contextlib +import os import subprocess import sys +from unittest import mock from core.constants import constants from core.tests import test_utils @@ -355,3 +357,36 @@ def test_start_tests_for_long_lived_process(self) -> None: with self.swap_mock_set_constants_to_default: with self.swap(constants, 'EMULATOR_MODE', True): run_acceptance_tests.main(args=['--suite', 'testSuite']) + + def test_print_test_output(self) -> None: + test_input = [ + b'Spec started: Test Suite 1', + b'Test case 1 passed', + b'Test case 2 failed', + b'Spec started: Test Suite 2', + b'Test case 3 skipped', + b'Test case 4 passed' + ] + expected_output_lines = [ + 'Spec started: Test Suite 1\n', + 'Test case 1 passed\n', + 'Test case 2 failed\n', + 'Spec started: Test Suite 2\n', + 'Test case 4 passed\n' + ] + + with mock.patch('builtins.open', mock.mock_open()) as mock_file: + run_acceptance_tests.print_test_output(test_input) + + mock_file.assert_called_once_with( + 'test_output.log', 'w', encoding='utf-8') + + mock_file_obj = mock_file.return_value + + mock_file_obj.write.assert_has_calls( + [mock.call(line) for line in expected_output_lines], + any_order=False + ) + + if os.path.exists('test_output.log'): + os.remove('test_output.log')