From bf7bbb8161efae93b8923688924ef44994ec660a Mon Sep 17 00:00:00 2001 From: rahat2134 Date: Tue, 16 Apr 2024 16:58:01 +0530 Subject: [PATCH 01/11] surface the result of acceptance tests --- scripts/run_acceptance_tests.py | 16 +++++++++++++++- scripts/run_acceptance_tests_test.py | 17 ++++++++++++++++- 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/scripts/run_acceptance_tests.py b/scripts/run_acceptance_tests.py index a44b94e92e4d..85c7a001b35a 100644 --- a/scripts/run_acceptance_tests.py +++ b/scripts/run_acceptance_tests.py @@ -1,4 +1,4 @@ -# Copyright 2023 The Oppia Authors. All Rights Reserved. +# Copyright 2024 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. @@ -99,6 +99,15 @@ def compile_test_ts_files() -> None: os.path.join(build_dir_path, 'images')) +def print_test_output(output_lines: List[bytes]) -> None: + """Print the test output lines to a separate file.""" + with open('test_output.log', 'w', encoding='utf-8') as output_file: + for line in output_lines: + line_text = line.decode('utf-8') + if 'passed' in line_text.lower() or 'failed' in line_text.lower(): + output_file.write(line_text + '\n') + + def run_tests(args: argparse.Namespace) -> Tuple[List[bytes], int]: """Run the scripts to start acceptance tests.""" if common.is_oppia_server_already_running(): @@ -166,6 +175,8 @@ def run_tests(args: argparse.Namespace) -> Tuple[List[bytes], int]: if proc.poll() is not None: break + print_test_output(output_lines) + return_value = output_lines, proc.returncode return return_value @@ -177,6 +188,9 @@ def main(args: Optional[List[str]] = None) -> None: with servers.managed_portserver(): _, return_code = run_tests(parsed_args) + with open('test_output.log', 'r', encoding='utf-8') as output_file: + print(output_file.read()) + sys.exit(return_code) diff --git a/scripts/run_acceptance_tests_test.py b/scripts/run_acceptance_tests_test.py index bc5a6e859037..4b93c6a9564a 100644 --- a/scripts/run_acceptance_tests_test.py +++ b/scripts/run_acceptance_tests_test.py @@ -1,4 +1,4 @@ -# Copyright 2023 The Oppia Authors. All Rights Reserved. +# Copyright 2024 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ from __future__ import annotations import contextlib +import os import subprocess import sys @@ -349,3 +350,17 @@ def test_start_tests_for_long_lived_process(self) -> None: with self.swap_mock_set_constants_to_default: with self.swap(constants, 'EMULATOR_MODE', True): run_acceptance_tests.main(args=['--suite', 'testSuite']) + + def test_print_test_output(self) -> None: + test_data = [b'Test case 1 passed', + b'Test case 2 failed', b'Test case 3 skipped'] + + run_acceptance_tests.print_test_output(test_data) + + with open('test_output.log', 'r', encoding='utf-8') as output_file: + lines = output_file.readlines() + self.assertEqual(len(lines), 2) + self.assertEqual(lines[0].strip(), 'Test case 1 passed') + self.assertEqual(lines[1].strip(), 'Test case 2 failed') + + os.remove('test_output.log') From 0ba363b932d85ea54f2009e0aacd9ab874baf87e Mon Sep 17 00:00:00 2001 From: rahat2134 Date: Sun, 21 Apr 2024 11:49:05 +0530 Subject: [PATCH 02/11] changes requested --- .../e2e_lighthouse_performance_acceptance_tests.yml | 4 ++++ scripts/run_acceptance_tests.py | 8 +++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/.github/workflows/e2e_lighthouse_performance_acceptance_tests.yml b/.github/workflows/e2e_lighthouse_performance_acceptance_tests.yml index 08d1cada72a6..57014bedb638 100644 --- a/.github/workflows/e2e_lighthouse_performance_acceptance_tests.yml +++ b/.github/workflows/e2e_lighthouse_performance_acceptance_tests.yml @@ -388,8 +388,12 @@ jobs: run: python -m scripts.build --prod_env - name: Run Desktop Acceptance Test ${{ matrix.suite }} run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_acceptance_tests --skip-build --suite=${{ matrix.suite }} --prod_env + - name: Display Desktop Test Output + run: cat test_output.log - name: Run Mobile Acceptance Test ${{ matrix.suite }} run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_acceptance_tests --skip-build --suite=${{ matrix.suite }} --prod_env --mobile + - name: Display Mobile Test Output + run: cat test_output.log - name: Uploading webpack bundles as an artifact if: ${{ failure() }} uses: actions/upload-artifact@v3 diff --git a/scripts/run_acceptance_tests.py b/scripts/run_acceptance_tests.py index 85c7a001b35a..ee7abd1cbbe6 100644 --- a/scripts/run_acceptance_tests.py +++ b/scripts/run_acceptance_tests.py @@ -102,9 +102,15 @@ def compile_test_ts_files() -> None: def print_test_output(output_lines: List[bytes]) -> None: """Print the test output lines to a separate file.""" with open('test_output.log', 'w', encoding='utf-8') as output_file: + last_spec_started_line = None for line in output_lines: line_text = line.decode('utf-8') - if 'passed' in line_text.lower() or 'failed' in line_text.lower(): + if line_text.startswith('Spec started'): + last_spec_started_line = line_text + elif 'passed' in line_text.lower() or 'failed' in line_text.lower(): + if last_spec_started_line: + output_file.write(last_spec_started_line + '\n') + last_spec_started_line = None output_file.write(line_text + '\n') From f5455aa5abc9a74acab807afca8bfdafc5bc6820 Mon Sep 17 00:00:00 2001 From: rahat2134 Date: Sun, 21 Apr 2024 11:57:57 +0530 Subject: [PATCH 03/11] changes --- scripts/run_acceptance_tests_test.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/scripts/run_acceptance_tests_test.py b/scripts/run_acceptance_tests_test.py index 4b93c6a9564a..744b2ec41161 100644 --- a/scripts/run_acceptance_tests_test.py +++ b/scripts/run_acceptance_tests_test.py @@ -352,15 +352,16 @@ def test_start_tests_for_long_lived_process(self) -> None: run_acceptance_tests.main(args=['--suite', 'testSuite']) def test_print_test_output(self) -> None: - test_data = [b'Test case 1 passed', - b'Test case 2 failed', b'Test case 3 skipped'] + test_data = [b'Spec started: Test case 1', b'Test case 1 passed', + b'Spec started: Test case 2', b'Test case 2 failed', + b'Spec started: Test case 3', b'Test case 3 passed'] run_acceptance_tests.print_test_output(test_data) with open('test_output.log', 'r', encoding='utf-8') as output_file: lines = output_file.readlines() - self.assertEqual(len(lines), 2) - self.assertEqual(lines[0].strip(), 'Test case 1 passed') - self.assertEqual(lines[1].strip(), 'Test case 2 failed') + for i in range(0, len(lines), 2): + self.assertTrue(lines[i].startswith('Spec started')) + self.assertTrue('passed' in lines[i+1] or 'failed' in lines[i+1]) - os.remove('test_output.log') + os.remove('test_output.log') \ No newline at end of file From cf107ca55ac12930e74a974e249812f5cf4b1c39 Mon Sep 17 00:00:00 2001 From: rahat2134 Date: Sun, 21 Apr 2024 13:51:51 +0530 Subject: [PATCH 04/11] changes --- scripts/run_acceptance_tests_test.py | 31 ++++++++++++++++------------ 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/scripts/run_acceptance_tests_test.py b/scripts/run_acceptance_tests_test.py index 744b2ec41161..2ff83c8ad93f 100644 --- a/scripts/run_acceptance_tests_test.py +++ b/scripts/run_acceptance_tests_test.py @@ -352,16 +352,21 @@ def test_start_tests_for_long_lived_process(self) -> None: run_acceptance_tests.main(args=['--suite', 'testSuite']) def test_print_test_output(self) -> None: - test_data = [b'Spec started: Test case 1', b'Test case 1 passed', - b'Spec started: Test case 2', b'Test case 2 failed', - b'Spec started: Test case 3', b'Test case 3 passed'] - - run_acceptance_tests.print_test_output(test_data) - - with open('test_output.log', 'r', encoding='utf-8') as output_file: - lines = output_file.readlines() - for i in range(0, len(lines), 2): - self.assertTrue(lines[i].startswith('Spec started')) - self.assertTrue('passed' in lines[i+1] or 'failed' in lines[i+1]) - - os.remove('test_output.log') \ No newline at end of file + test_data = [ + b'Spec started: Test case 1', + b'Test case 1 passed', + b'Spec started: Test case 2', + b'Test case 2 failed', + b'Spec started: Test case 3', + b'Test case 3 passed' + ] + + run_acceptance_tests.print_test_output(test_data) + + with open('test_output.log', 'r', encoding='utf-8') as output_file: + lines = output_file.readlines() + for i in range(0, len(lines), 2): + self.assertTrue(lines[i].startswith('Spec started')) + self.assertTrue('passed' in lines[i+1] or 'failed' in lines[i+1]) + + os.remove('test_output.log') From 569c3385a83e797539f16d8dae38ebda3f396aa4 Mon Sep 17 00:00:00 2001 From: rahat2134 Date: Sun, 21 Apr 2024 13:57:13 +0530 Subject: [PATCH 05/11] changes --- scripts/run_acceptance_tests_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/run_acceptance_tests_test.py b/scripts/run_acceptance_tests_test.py index 2ff83c8ad93f..bef21b346a21 100644 --- a/scripts/run_acceptance_tests_test.py +++ b/scripts/run_acceptance_tests_test.py @@ -367,6 +367,6 @@ def test_print_test_output(self) -> None: lines = output_file.readlines() for i in range(0, len(lines), 2): self.assertTrue(lines[i].startswith('Spec started')) - self.assertTrue('passed' in lines[i+1] or 'failed' in lines[i+1]) + self.assertTrue('passed' in lines[i + 1] or 'failed' in lines[i + 1]) os.remove('test_output.log') From 7e03d23eab09495762c3ad18998d99362037c6ae Mon Sep 17 00:00:00 2001 From: rahat2134 Date: Sun, 21 Apr 2024 18:39:47 +0530 Subject: [PATCH 06/11] changes --- scripts/run_acceptance_tests_test.py | 74 +++++++++++++++++++++++----- 1 file changed, 63 insertions(+), 11 deletions(-) diff --git a/scripts/run_acceptance_tests_test.py b/scripts/run_acceptance_tests_test.py index bef21b346a21..770c6cbcd28b 100644 --- a/scripts/run_acceptance_tests_test.py +++ b/scripts/run_acceptance_tests_test.py @@ -20,6 +20,7 @@ import os import subprocess import sys +from unittest import mock from core.constants import constants from core.tests import test_utils @@ -353,20 +354,71 @@ def test_start_tests_for_long_lived_process(self) -> None: def test_print_test_output(self) -> None: test_data = [ - b'Spec started: Test case 1', + b'Spec started: Test Suite 1', b'Test case 1 passed', - b'Spec started: Test case 2', b'Test case 2 failed', - b'Spec started: Test case 3', - b'Test case 3 passed' + b'Spec started: Test Suite 2', + b'Test case 3 passed', + b'Test case 4 skipped', + b'Test case 5 failed', + ] + expected_output = [ + 'Spec started: Test Suite 1', + 'Test case 1 passed', + 'Test case 2 failed', + 'Spec started: Test Suite 2', + 'Test case 3 passed', + 'Test case 5 failed', + ] + + run_acceptance_tests.print_test_output(test_data) + + with open( + self.test_output_file_path, 'r', encoding='utf-8' + ) as output_file: + lines = output_file.readlines() + + self.assertEqual(len(lines), len(expected_output)) + for line, expected_line in zip(lines, expected_output): + self.assertEqual(line.strip(), expected_line) + + @mock.patch( + 'builtins.open', new_callable=mock.mock_open + ) + def test_print_test_output_with_io_error( + self, mock_open: mock.MagicMock + ) -> None: + mock_open.side_effect = IOError('Failed to open file') + test_data = [ + b'Spec started: Test Suite', + b'Test case 1 passed' + ] + + with self.assertRaises(IOError): + run_acceptance_tests.print_test_output(test_data) + + mock_open.assert_called_once_with( + self.test_output_file_path, 'w', encoding='utf-8' + ) + + def test_print_test_output_results(self) -> None: + test_data = [ + b'Spec started: Test case 1', + b'Test case 1 passed', + b'Spec started: Test case 2', + b'Test case 2 failed', + b'Spec started: Test case 3', + b'Test case 3 skipped' ] - run_acceptance_tests.print_test_output(test_data) + run_acceptance_tests.test_print_test_output_results(test_data) - with open('test_output.log', 'r', encoding='utf-8') as output_file: - lines = output_file.readlines() - for i in range(0, len(lines), 2): - self.assertTrue(lines[i].startswith('Spec started')) - self.assertTrue('passed' in lines[i + 1] or 'failed' in lines[i + 1]) + with open('test_output.log', 'r', encoding='utf-8') as output_file: + lines = output_file.readlines() + self.assertEqual(len(lines), 4) + self.assertEqual(lines[0].strip(), 'Spec started: Test case 1') + self.assertEqual(lines[1].strip(), 'Test case 1 passed') + self.assertEqual(lines[2].strip(), 'Spec started: Test case 2') + self.assertEqual(lines[3].strip(), 'Test case 2 failed') - os.remove('test_output.log') + os.remove('test_output.log') From 8501baf668531ae4c8adadbaa86fa42884ff9cac Mon Sep 17 00:00:00 2001 From: rahat2134 Date: Mon, 22 Apr 2024 15:30:36 +0530 Subject: [PATCH 07/11] changes --- scripts/run_acceptance_tests_test.py | 53 ++++++++++++---------------- 1 file changed, 23 insertions(+), 30 deletions(-) diff --git a/scripts/run_acceptance_tests_test.py b/scripts/run_acceptance_tests_test.py index 770c6cbcd28b..b9d38d54df7b 100644 --- a/scripts/run_acceptance_tests_test.py +++ b/scripts/run_acceptance_tests_test.py @@ -21,6 +21,7 @@ import subprocess import sys from unittest import mock +from unittest.mock import Mock from core.constants import constants from core.tests import test_utils @@ -88,8 +89,11 @@ def mock_managed_process( class RunAcceptanceTestsTests(test_utils.GenericTestBase): """Test the run_acceptance_tests methods.""" + test_output_file_path = 'test_output.log' + def setUp(self) -> None: super().setUp() + self.output_lines = ['Test output line 1', 'Test output line 2'] self.exit_stack = contextlib.ExitStack() def mock_constants() -> None: @@ -385,40 +389,29 @@ def test_print_test_output(self) -> None: @mock.patch( 'builtins.open', new_callable=mock.mock_open ) - def test_print_test_output_with_io_error( - self, mock_open: mock.MagicMock - ) -> None: + def test_print_test_output_with_io_error(self, mock_open: Mock) -> None: mock_open.side_effect = IOError('Failed to open file') - test_data = [ - b'Spec started: Test Suite', - b'Test case 1 passed' - ] - - with self.assertRaises(IOError): - run_acceptance_tests.print_test_output(test_data) - + self.assertRaises( + IOError, + run_acceptance_tests.print_test_output, + self.output_lines + ) mock_open.assert_called_once_with( - self.test_output_file_path, 'w', encoding='utf-8' + 'test_output.log', 'w', encoding='utf-8' ) def test_print_test_output_results(self) -> None: - test_data = [ - b'Spec started: Test case 1', - b'Test case 1 passed', - b'Spec started: Test case 2', - b'Test case 2 failed', - b'Spec started: Test case 3', - b'Test case 3 skipped' - ] - - run_acceptance_tests.test_print_test_output_results(test_data) + test_instance = RunAcceptanceTestsTests() + try: + test_instance.test_print_test_output_results() - with open('test_output.log', 'r', encoding='utf-8') as output_file: - lines = output_file.readlines() - self.assertEqual(len(lines), 4) - self.assertEqual(lines[0].strip(), 'Spec started: Test case 1') - self.assertEqual(lines[1].strip(), 'Test case 1 passed') - self.assertEqual(lines[2].strip(), 'Spec started: Test case 2') - self.assertEqual(lines[3].strip(), 'Test case 2 failed') + with open('test_output.log', 'r', encoding='utf-8') as output_file: + lines = output_file.readlines() + self.assertEqual(len(lines), 4) + self.assertEqual(lines[0].strip(), 'Spec started: Test case 1') + self.assertEqual(lines[1].strip(), 'Test case 1 passed') + self.assertEqual(lines[2].strip(), 'Spec started: Test case 2') + self.assertEqual(lines[3].strip(), 'Test case 2 failed') - os.remove('test_output.log') + finally: + os.remove('test_output.log') From 3a8e5960f7149bfa1239533b57b6396cc9674ea7 Mon Sep 17 00:00:00 2001 From: rahat2134 Date: Sat, 27 Apr 2024 23:07:17 +0530 Subject: [PATCH 08/11] changes in tests --- scripts/run_acceptance_tests_test.py | 68 +++++++--------------------- 1 file changed, 17 insertions(+), 51 deletions(-) diff --git a/scripts/run_acceptance_tests_test.py b/scripts/run_acceptance_tests_test.py index b9d38d54df7b..e4c5f74b54e4 100644 --- a/scripts/run_acceptance_tests_test.py +++ b/scripts/run_acceptance_tests_test.py @@ -21,7 +21,6 @@ import subprocess import sys from unittest import mock -from unittest.mock import Mock from core.constants import constants from core.tests import test_utils @@ -89,11 +88,8 @@ def mock_managed_process( class RunAcceptanceTestsTests(test_utils.GenericTestBase): """Test the run_acceptance_tests methods.""" - test_output_file_path = 'test_output.log' - def setUp(self) -> None: super().setUp() - self.output_lines = ['Test output line 1', 'Test output line 2'] self.exit_stack = contextlib.ExitStack() def mock_constants() -> None: @@ -362,56 +358,26 @@ def test_print_test_output(self) -> None: b'Test case 1 passed', b'Test case 2 failed', b'Spec started: Test Suite 2', - b'Test case 3 passed', - b'Test case 4 skipped', - b'Test case 5 failed', - ] - expected_output = [ - 'Spec started: Test Suite 1', - 'Test case 1 passed', - 'Test case 2 failed', - 'Spec started: Test Suite 2', - 'Test case 3 passed', - 'Test case 5 failed', + b'Test case 3 skipped', + b'Test case 4 passed' ] - - run_acceptance_tests.print_test_output(test_data) - - with open( - self.test_output_file_path, 'r', encoding='utf-8' - ) as output_file: - lines = output_file.readlines() - - self.assertEqual(len(lines), len(expected_output)) - for line, expected_line in zip(lines, expected_output): - self.assertEqual(line.strip(), expected_line) - - @mock.patch( - 'builtins.open', new_callable=mock.mock_open - ) - def test_print_test_output_with_io_error(self, mock_open: Mock) -> None: - mock_open.side_effect = IOError('Failed to open file') - self.assertRaises( - IOError, - run_acceptance_tests.print_test_output, - self.output_lines - ) - mock_open.assert_called_once_with( - 'test_output.log', 'w', encoding='utf-8' + expected_output = ( + 'Spec started: Test Suite 1\n' + 'Test case 1 passed\n' + 'Test case 2 failed\n' + 'Spec started: Test Suite 2\n' + 'Test case 4 passed\n' ) - def test_print_test_output_results(self) -> None: - test_instance = RunAcceptanceTestsTests() - try: - test_instance.test_print_test_output_results() + with mock.patch('builtins.open', mock.mock_open()) as mock_file: + run_acceptance_tests.print_test_output(test_data) - with open('test_output.log', 'r', encoding='utf-8') as output_file: - lines = output_file.readlines() - self.assertEqual(len(lines), 4) - self.assertEqual(lines[0].strip(), 'Spec started: Test case 1') - self.assertEqual(lines[1].strip(), 'Test case 1 passed') - self.assertEqual(lines[2].strip(), 'Spec started: Test case 2') - self.assertEqual(lines[3].strip(), 'Test case 2 failed') + mock_file.assert_called_once_with( + 'test_output.log', 'w', encoding='utf-8') - finally: + mock_file_obj = mock_file.return_value + + mock_file_obj.write.assert_called_once_with(expected_output) + + if os.path.exists('test_output.log'): os.remove('test_output.log') From caec6d154449aebdcbaa098e50b2d0c7c4f814a2 Mon Sep 17 00:00:00 2001 From: rahat2134 Date: Mon, 29 Apr 2024 23:00:44 +0530 Subject: [PATCH 09/11] changes --- scripts/run_acceptance_tests_test.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/scripts/run_acceptance_tests_test.py b/scripts/run_acceptance_tests_test.py index e4c5f74b54e4..036eed0bfc26 100644 --- a/scripts/run_acceptance_tests_test.py +++ b/scripts/run_acceptance_tests_test.py @@ -361,13 +361,13 @@ def test_print_test_output(self) -> None: b'Test case 3 skipped', b'Test case 4 passed' ] - expected_output = ( - 'Spec started: Test Suite 1\n' - 'Test case 1 passed\n' - 'Test case 2 failed\n' - 'Spec started: Test Suite 2\n' + expected_output_lines = [ + 'Spec started: Test Suite 1\n', + 'Test case 1 passed\n', + 'Test case 2 failed\n', + 'Spec started: Test Suite 2\n', 'Test case 4 passed\n' - ) + ] with mock.patch('builtins.open', mock.mock_open()) as mock_file: run_acceptance_tests.print_test_output(test_data) @@ -377,7 +377,10 @@ def test_print_test_output(self) -> None: mock_file_obj = mock_file.return_value - mock_file_obj.write.assert_called_once_with(expected_output) + mock_file_obj.write.assert_has_calls( + [mock.call(line) for line in expected_output_lines], + any_order=False + ) if os.path.exists('test_output.log'): os.remove('test_output.log') From 138e167f2bbfa6b4c5cc03eaefe74fde31cf4573 Mon Sep 17 00:00:00 2001 From: rahat2134 Date: Tue, 30 Apr 2024 19:31:44 +0530 Subject: [PATCH 10/11] change --- scripts/run_acceptance_tests_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/run_acceptance_tests_test.py b/scripts/run_acceptance_tests_test.py index 036eed0bfc26..d69a24f55166 100644 --- a/scripts/run_acceptance_tests_test.py +++ b/scripts/run_acceptance_tests_test.py @@ -353,7 +353,7 @@ def test_start_tests_for_long_lived_process(self) -> None: run_acceptance_tests.main(args=['--suite', 'testSuite']) def test_print_test_output(self) -> None: - test_data = [ + test_input = [ b'Spec started: Test Suite 1', b'Test case 1 passed', b'Test case 2 failed', @@ -370,7 +370,7 @@ def test_print_test_output(self) -> None: ] with mock.patch('builtins.open', mock.mock_open()) as mock_file: - run_acceptance_tests.print_test_output(test_data) + run_acceptance_tests.print_test_output(test_input) mock_file.assert_called_once_with( 'test_output.log', 'w', encoding='utf-8') From 1692c11df8d220938def4fa28c8939524d13e4d7 Mon Sep 17 00:00:00 2001 From: rahat2134 Date: Tue, 4 Jun 2024 00:00:44 +0530 Subject: [PATCH 11/11] changes --- .../e2e_lighthouse_performance_acceptance_tests.yml | 4 ++-- scripts/run_acceptance_tests.py | 11 ++--------- 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/.github/workflows/e2e_lighthouse_performance_acceptance_tests.yml b/.github/workflows/e2e_lighthouse_performance_acceptance_tests.yml index 26fd8696838d..5d4e40c4bc85 100644 --- a/.github/workflows/e2e_lighthouse_performance_acceptance_tests.yml +++ b/.github/workflows/e2e_lighthouse_performance_acceptance_tests.yml @@ -392,11 +392,11 @@ jobs: run: python -m scripts.build --prod_env - name: Run Desktop Acceptance Test ${{ matrix.suite }} run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_acceptance_tests --skip-build --suite=${{ matrix.suite }} --prod_env - - name: Display Desktop Test Output + - name: Display Desktop Acceptance Test Output run: cat test_output.log - name: Run Mobile Acceptance Test ${{ matrix.suite }} run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_acceptance_tests --skip-build --suite=${{ matrix.suite }} --prod_env --mobile - - name: Display Mobile Test Output + - name: Display Mobile Acceptance Test Output run: cat test_output.log - name: Uploading webpack bundles as an artifact if: ${{ failure() }} diff --git a/scripts/run_acceptance_tests.py b/scripts/run_acceptance_tests.py index a4f6abe4142a..6bbdd1878c4c 100644 --- a/scripts/run_acceptance_tests.py +++ b/scripts/run_acceptance_tests.py @@ -100,18 +100,11 @@ def compile_test_ts_files() -> None: def print_test_output(output_lines: List[bytes]) -> None: - """Print the test output lines to a separate file.""" + """Print all the test output lines to a separate file.""" with open('test_output.log', 'w', encoding='utf-8') as output_file: - last_spec_started_line = None for line in output_lines: line_text = line.decode('utf-8') - if line_text.startswith('Spec started'): - last_spec_started_line = line_text - elif 'passed' in line_text.lower() or 'failed' in line_text.lower(): - if last_spec_started_line: - output_file.write(last_spec_started_line + '\n') - last_spec_started_line = None - output_file.write(line_text + '\n') + output_file.write(line_text + '\n') def run_tests(args: argparse.Namespace) -> Tuple[List[bytes], int]: