-
-
Notifications
You must be signed in to change notification settings - Fork 3.8k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Fix #20139: Surface Acceptance Test results in a more convenient location #20171
base: develop
Are you sure you want to change the base?
Changes from 28 commits
bf7bbb8
e602e83
4af3798
901b018
0ba363b
987a4ea
f5455aa
92c1c1a
cf107ca
569c338
7e03d23
c1d157f
8501baf
e7fb522
392b5bc
04653c6
3a8e596
c566e51
e239bcc
80108cb
caec6d1
14540d7
18a9a48
b7a5149
138e167
50a1339
fd0e56b
36071fa
66bbd22
2683b98
35ba89b
ee07b5c
479daf1
1692c11
b72be13
2068cec
ee736d9
fc3bc12
4c225d5
7872f67
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -392,8 +392,12 @@ jobs: | |
run: python -m scripts.build --prod_env | ||
- name: Run Desktop Acceptance Test ${{ matrix.suite }} | ||
run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_acceptance_tests --skip-build --suite=${{ matrix.suite }} --prod_env | ||
- name: Display Desktop Test Output | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Display Desktop Acceptance Test Output |
||
run: cat test_output.log | ||
- name: Run Mobile Acceptance Test ${{ matrix.suite }} | ||
run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_acceptance_tests --skip-build --suite=${{ matrix.suite }} --prod_env --mobile | ||
- name: Display Mobile Test Output | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Display Mobile Acceptance Test Output |
||
run: cat test_output.log | ||
- name: Uploading webpack bundles as an artifact | ||
if: ${{ failure() }} | ||
uses: actions/upload-artifact@v3 | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,4 +1,4 @@ | ||
# Copyright 2023 The Oppia Authors. All Rights Reserved. | ||
# Copyright 2024 The Oppia Authors. All Rights Reserved. | ||
# | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks for this! :) |
||
# Licensed under the Apache License, Version 2.0 (the 'License'); | ||
# you may not use this file except in compliance with the License. | ||
|
@@ -99,6 +99,21 @@ def compile_test_ts_files() -> None: | |
os.path.join(build_dir_path, 'images')) | ||
|
||
|
||
def print_test_output(output_lines: List[bytes]) -> None: | ||
"""Print the test output lines to a separate file.""" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I do think this is all very complex. You can just print all the lines. In the event of a failure, I don't think this will print the call stack as well. Which we would want to see as it tells you where and why the test failed. |
||
with open('test_output.log', 'w', encoding='utf-8') as output_file: | ||
last_spec_started_line = None | ||
for line in output_lines: | ||
line_text = line.decode('utf-8') | ||
if line_text.startswith('Spec started'): | ||
last_spec_started_line = line_text | ||
elif 'passed' in line_text.lower() or 'failed' in line_text.lower(): | ||
if last_spec_started_line: | ||
output_file.write(last_spec_started_line + '\n') | ||
last_spec_started_line = None | ||
output_file.write(line_text + '\n') | ||
|
||
|
||
def run_tests(args: argparse.Namespace) -> Tuple[List[bytes], int]: | ||
"""Run the scripts to start acceptance tests.""" | ||
if common.is_oppia_server_already_running(): | ||
|
@@ -166,6 +181,8 @@ def run_tests(args: argparse.Namespace) -> Tuple[List[bytes], int]: | |
if proc.poll() is not None: | ||
break | ||
|
||
print_test_output(output_lines) | ||
|
||
return_value = output_lines, proc.returncode | ||
return return_value | ||
|
||
|
@@ -177,6 +194,9 @@ def main(args: Optional[List[str]] = None) -> None: | |
with servers.managed_portserver(): | ||
_, return_code = run_tests(parsed_args) | ||
|
||
with open('test_output.log', 'r', encoding='utf-8') as output_file: | ||
print(output_file.read()) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The test output is being printed a second time here and that's not necessary. |
||
|
||
sys.exit(return_code) | ||
|
||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,4 +1,4 @@ | ||
# Copyright 2023 The Oppia Authors. All Rights Reserved. | ||
# Copyright 2024 The Oppia Authors. All Rights Reserved. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
|
@@ -17,8 +17,10 @@ | |
from __future__ import annotations | ||
|
||
import contextlib | ||
import os | ||
import subprocess | ||
import sys | ||
from unittest import mock | ||
|
||
from core.constants import constants | ||
from core.tests import test_utils | ||
|
@@ -349,3 +351,36 @@ def test_start_tests_for_long_lived_process(self) -> None: | |
with self.swap_mock_set_constants_to_default: | ||
with self.swap(constants, 'EMULATOR_MODE', True): | ||
run_acceptance_tests.main(args=['--suite', 'testSuite']) | ||
|
||
def test_print_test_output(self) -> None: | ||
test_input = [ | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please include the result of printing test output in the test name |
||
b'Spec started: Test Suite 1', | ||
b'Test case 1 passed', | ||
b'Test case 2 failed', | ||
b'Spec started: Test Suite 2', | ||
b'Test case 3 skipped', | ||
b'Test case 4 passed' | ||
] | ||
expected_output_lines = [ | ||
'Spec started: Test Suite 1\n', | ||
'Test case 1 passed\n', | ||
'Test case 2 failed\n', | ||
'Spec started: Test Suite 2\n', | ||
'Test case 4 passed\n' | ||
] | ||
|
||
with mock.patch('builtins.open', mock.mock_open()) as mock_file: | ||
run_acceptance_tests.print_test_output(test_input) | ||
|
||
mock_file.assert_called_once_with( | ||
'test_output.log', 'w', encoding='utf-8') | ||
|
||
mock_file_obj = mock_file.return_value | ||
|
||
mock_file_obj.write.assert_has_calls( | ||
[mock.call(line) for line in expected_output_lines], | ||
any_order=False | ||
) | ||
|
||
if os.path.exists('test_output.log'): | ||
os.remove('test_output.log') |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can you add a flag for this new behavior and set it here?