From f70d9349a5a8e80b832cc6ff7d753e13a910425b Mon Sep 17 00:00:00 2001 From: Luciano Renzi Date: Sat, 6 Jul 2019 23:52:40 -0300 Subject: [PATCH] implement skip flag for tests, closes #146 --- CHANGELOG.md | 2 + docs/source/tests.md | 32 +- golem/core/test.py | 13 +- golem/gui/api.py | 3 +- golem/gui/static/js/main.js | 4 - golem/gui/static/js/test_case.js | 18 + .../gui/templates/test_builder/test_case.html | 16 +- golem/test_runner/conf.py | 1 + golem/test_runner/execution_runner.py | 6 +- golem/test_runner/multiprocess_executor.py | 5 +- golem/test_runner/test_runner.py | 31 +- tests/conftest.py | 4 +- tests/core/test_test.py | 88 +- tests/test_runner/test_runner_test.py | 771 ++++++++---------- 14 files changed, 508 insertions(+), 486 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f50647a..38c955bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,8 @@ - Remove executions from report dashboard +- Skip flag to test [#146](https://github.com/golemhq/golem/issues/146) + ### Fixed - [#169](https://github.com/golemhq/golem/issues/169) diff --git a/docs/source/tests.md b/docs/source/tests.md index 9311eae8..2c4a342d 100644 --- a/docs/source/tests.md +++ b/docs/source/tests.md @@ -21,21 +21,34 @@ tags = [] pages = [] +skip = False + + def setup(data): pass + def test(data): pass + def teardown(data): pass - ``` -A test must implement at least a 'test' function that receives a data object as argument. +A test must implement at least a **test** function that receives a data object as argument. + +## Test Data -## Infile Test Data +Test data can be defined inside the file or in a separate CSV file. +For detailed info about see: [Test Data](test-data.html) + +### CSV Data + +It should be defined in a CSV file with the same name and in the same folder as the test. + +### Infile Test Data A test can have data defined as a list of dictionaries. @@ -59,12 +72,23 @@ def test(data): Note: when saving a test using the Test Module, if the *test_data* setting is not 'infile', any data stored in the test will be moved to a CSV file. +## Skip flag + +A flag variable to indicate that this test should be skipped. +It should be a boolean or a string to use as skip message. +Note: tests will only be skipped when running from a suite. + +## Tags + +A list of tags (strings). +Tags can be used to filter tests when running a suite. +See [Filter Tests by Tags](running-tests.html#filter-tests-by-tags). ## Implicit vs Explicit Imports By default the test runner imports the golem.actions module and any page module implicitly during the execution. Pages are saved as a list of strings. -The GUI test builder complies with this format and generates code as the following: +The GUI test builder complies with this format and generates code like the following: ```python pages = ['page1'] diff --git a/golem/core/test.py b/golem/core/test.py index 3fdf5f94..553f7a5b 100644 --- a/golem/core/test.py +++ b/golem/core/test.py @@ -84,7 +84,7 @@ def duplicate_test(project, name, new_name): return errors -def edit_test(project, test_name, description, pages, steps, test_data, tags): +def edit_test(project, test_name, description, pages, steps, test_data, tags, skip=False): """Save test contents to file""" def _format_description(description): @@ -167,6 +167,10 @@ def _format_steps(steps): test_data_module.remove_csv_if_exists(project, test_name) else: test_data_module.save_external_test_data_file(project, test_name, test_data) + if skip: + if type(skip) is str: + skip = "'{}'".format(skip) + f.write('skip = {}\n\n'.format(skip)) f.write('\n') f.write('def setup(data):\n') if steps['setup']: @@ -271,6 +275,10 @@ def steps(self): steps['teardown'] = [] return steps + @property + def skip(self): + return getattr(self.get_module(), 'skip', False) + @property def components(self): """Parse and return the components of a Test in @@ -287,6 +295,7 @@ def components(self): 'description': self.description, 'pages': self.pages, 'tags': self.tags, - 'steps': self.steps + 'steps': self.steps, + 'skip': self.skip } return components diff --git a/golem/gui/api.py b/golem/gui/api.py index db7a2e4d..ef13c1e2 100644 --- a/golem/gui/api.py +++ b/golem/gui/api.py @@ -644,9 +644,10 @@ def test_save(): test_data_content = request.json['testData'] test_steps = request.json['steps'] tags = request.json['tags'] + skip = request.json['skip'] _verify_permissions(Permissions.STANDARD, project) test_module.edit_test(project, test_name, description, pages, test_steps, - test_data_content, tags) + test_data_content, tags, skip) return jsonify('test-saved') diff --git a/golem/gui/static/js/main.js b/golem/gui/static/js/main.js index ace61fef..6b84b1b8 100644 --- a/golem/gui/static/js/main.js +++ b/golem/gui/static/js/main.js @@ -655,10 +655,6 @@ const Main = new function(){ 'skipped': { code: 'skipped', color: '#ced4da' - }, - 'not run': { - code: 'skipped', - color: '#868e96' } } diff --git a/golem/gui/static/js/test_case.js b/golem/gui/static/js/test_case.js index f20b6802..9c4ba342 100644 --- a/golem/gui/static/js/test_case.js +++ b/golem/gui/static/js/test_case.js @@ -281,6 +281,14 @@ var Test = new function(){ } }); } + let skip = ($("#skipCheckbox").prop('checked')); + if(skip){ + let reason = $("#skipReason").val().trim() + if(reason.length){ + skip = reason + } + } + let data = { 'description': description, 'pages': pageObjects, @@ -289,7 +297,9 @@ var Test = new function(){ 'project': Test.project, 'testName': Test.fullName, 'tags': tags, + 'skip': skip } + $.ajax({ url: "/api/test/save", data: JSON.stringify(data), @@ -643,5 +653,13 @@ var Test = new function(){ if(section == 'test') return $("#testSteps .steps") if(section == 'teardown') return $("#teardownSteps .steps") } + + this.onSkipCheckboxChange = function(){ + if($("#skipCheckbox").prop('checked')) { + $("#skipReason").show(); + } else { + $("#skipReason").hide(); + } + } } } diff --git a/golem/gui/templates/test_builder/test_case.html b/golem/gui/templates/test_builder/test_case.html index e4664264..9e10b7f8 100644 --- a/golem/gui/templates/test_builder/test_case.html +++ b/golem/gui/templates/test_builder/test_case.html @@ -39,13 +39,27 @@

Description

-
+
Tags
+
+
+
Skip
+
+
+ +
+
+
+ +
+
+

Pages

diff --git a/golem/test_runner/conf.py b/golem/test_runner/conf.py index d021d34c..bc5e5d01 100644 --- a/golem/test_runner/conf.py +++ b/golem/test_runner/conf.py @@ -9,3 +9,4 @@ class ResultsEnum: SUCCESS = 'success' STOPPED = 'stopped' NOT_RUN = 'not run' + SKIPPED = 'skipped' diff --git a/golem/test_runner/execution_runner.py b/golem/test_runner/execution_runner.py index 742722b0..6b685612 100644 --- a/golem/test_runner/execution_runner.py +++ b/golem/test_runner/execution_runner.py @@ -394,12 +394,14 @@ def _execute(self): run_test(session.testdir, self.project, test.name, test.data_set, test.secrets, test.browser, session.settings, test.reportdir, - self.execution.has_failed_tests, self.execution.tags) + self.execution.has_failed_tests, + self.execution.tags, self.is_suite) else: # run tests using multiprocessing multiprocess_executor(self.project, self.execution.tests, self.execution.has_failed_tests, - self.execution.processes, self.execution.tags) + self.execution.processes, self.execution.tags, + self.is_suite) # run suite `after` function if self.suite.after: diff --git a/golem/test_runner/multiprocess_executor.py b/golem/test_runner/multiprocess_executor.py index 0f93d80b..8e3683fa 100644 --- a/golem/test_runner/multiprocess_executor.py +++ b/golem/test_runner/multiprocess_executor.py @@ -9,7 +9,7 @@ def multiprocess_executor(project, execution_list, has_failed_tests, processes=1, - tags=None): + tags=None, is_suite=False): """Runs a list of tests in parallel using multiprocessing""" pool = Pool(processes=processes, maxtasksperchild=1) results = [] @@ -23,7 +23,8 @@ def multiprocess_executor(project, execution_list, has_failed_tests, processes=1 session.settings, test.reportdir, has_failed_tests, - tags) + tags, + is_suite) apply_async = pool.apply_async(run_test, args=args) results.append(apply_async) map(ApplyResult.wait, results) diff --git a/golem/test_runner/test_runner.py b/golem/test_runner/test_runner.py index 8c7cda13..eeefb116 100644 --- a/golem/test_runner/test_runner.py +++ b/golem/test_runner/test_runner.py @@ -50,21 +50,25 @@ def _get_set_name(test_data): def run_test(testdir, project, test_name, test_data, secrets, browser, - settings, report_directory, execution_has_failed_tests=None, tags=None): + settings, report_directory, execution_has_failed_tests=None, + tags=None, from_suite=False): """Run a single test""" session.testdir = testdir runner = TestRunner(testdir, project, test_name, test_data, secrets, browser, - settings, report_directory, execution_has_failed_tests, tags) + settings, report_directory, execution_has_failed_tests, + tags, from_suite) runner.prepare() class TestRunner: + __test__ = False # ignore this class from Pytest def __init__(self, testdir, project, test_name, test_data, secrets, browser, - settings, report_directory, execution_has_failed_tests=None, tags=None): + settings, report_directory, execution_has_failed_tests=None, + tags=None, from_suite=False): self.result = { - 'result': '', + 'result': None, 'errors': [], 'description': '', 'steps': [], @@ -88,6 +92,7 @@ def __init__(self, testdir, project, test_name, test_data, secrets, browser, self.logger = None self.execution_has_failed_tests = execution_has_failed_tests self.execution_tags = tags or [] + self.from_suite = from_suite def prepare(self): self.result['set_name'] = _get_set_name(self.test_data) @@ -145,7 +150,18 @@ def import_modules(self): trcbk = traceback.format_exc() actions._add_error(message=message, description=trcbk) self.result['result'] = ResultsEnum.CODE_ERROR - if self.result['result'] == ResultsEnum.CODE_ERROR: + + # check for skip flag + # test is skipped only when run from a suite + skip = getattr(self.test_module, 'skip', False) + if skip and self.from_suite: + self.result['result'] = ResultsEnum.SKIPPED + msg = 'Skip: {}'.format(skip) if type(skip) is str else 'Skip' + execution.logger.info(msg) + + + + if self.result['result'] in [ResultsEnum.CODE_ERROR, ResultsEnum.SKIPPED]: self.finalize() else: self.run_setup() @@ -222,8 +238,9 @@ def finalize(self): if self.result['result'] not in [ResultsEnum.CODE_ERROR, ResultsEnum.FAILURE]: if execution.errors: self.result['result'] = ResultsEnum.ERROR - else: - self.result['result'] = ResultsEnum.SUCCESS + + if self.result['result'] is None: + self.result['result'] = ResultsEnum.SUCCESS execution.logger.info('Test Result: {}'.format(self.result['result'].upper())) self.result['description'] = execution.description diff --git a/tests/conftest.py b/tests/conftest.py index 8f636192..f81fe36e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -242,9 +242,11 @@ def create_random_suite(project): return suite_name @staticmethod - def create_random_page(project): + def create_random_page(project, code=None): page_name = TestUtils.random_string(10) page.create_page(project, page_name) + if code is not None: + page.edit_page_code(project, page_name, code) return page_name diff --git a/tests/core/test_test.py b/tests/core/test_test.py index ecaac4eb..fa5b1cbe 100644 --- a/tests/core/test_test.py +++ b/tests/core/test_test.py @@ -50,6 +50,8 @@ def teardown(data): """ +EMPTY_STEPS = {'setup': [], 'test': [], 'teardown': []} + class TestCreateTest: def test_create_test(self, project_session, test_utils): @@ -172,8 +174,8 @@ def test_duplicate_test_with_data_file(self, project_session, test_utils): class TestEditTest: - def test_edit_test_data_infile(self, project_session, test_utils): - _, project = project_session.activate() + def test_edit_test_data_infile(self, project_function, test_utils): + _, project = project_function.activate() test_name = test_utils.create_random_test(project) description = 'description' pages = ['page1', 'page2'] @@ -191,7 +193,6 @@ def test_edit_test_data_infile(self, project_session, test_utils): }] settings_manager.save_project_settings(project, '{"test_data": "infile"}') test_module.edit_test(project, test_name, description, pages, test_steps, data, []) - path = test_module.Test(project, test_name).path expected = ( '\n' 'description = \'description\'\n' @@ -215,11 +216,11 @@ def test_edit_test_data_infile(self, project_session, test_utils): '\n\n' 'def teardown(data):\n' ' pass\n') - with open(path) as f: + with open(Test(project, test_name).path) as f: assert f.read() == expected - def test_edit_test_data_csv(self, project_session, test_utils): - _, project = project_session.activate() + def test_edit_test_data_csv(self, project_function, test_utils): + _, project = project_function.activate() test_name = test_utils.create_random_test(project) description = 'description' pages = [] @@ -235,7 +236,6 @@ def test_edit_test_data_csv(self, project_session, test_utils): }] settings_manager.save_project_settings(project, '{"test_data": "csv"}') test_module.edit_test(project, test_name, description, pages, test_steps, data, []) - path = test_module.Test(project, test_name).path expected = ( '\n' 'description = \'description\'\n' @@ -252,7 +252,7 @@ def test_edit_test_data_csv(self, project_session, test_utils): '\n\n' 'def teardown(data):\n' ' pass\n') - with open(path) as f: + with open(Test(project, test_name).path) as f: assert f.read() == expected data_path = os.path.join(Project(project).test_directory_path, '{}.csv'.format(test_name)) @@ -261,15 +261,13 @@ def test_edit_test_data_csv(self, project_session, test_utils): with open(data_path) as f: assert f.read() == expected - def test_edit_test_explicit_page_import(self, project_session, test_utils): - _, project = project_session.activate() + def test_edit_test_explicit_page_import(self, project_function, test_utils): + _, project = project_function.activate() test_name = test_utils.create_random_test(project) pages = ['page1', 'module.page2'] - steps = {'setup': [], 'test': [], 'teardown': []} settings_manager.save_project_settings(project, '{"implicit_page_import": false}') test_module.edit_test(project, test_name, description='', pages=pages, - steps=steps, test_data=[], tags=[]) - path = test_module.Test(project, test_name).path + steps=EMPTY_STEPS, test_data=[], tags=[]) expected = ('from projects.{}.pages import page1\n' 'from projects.{}.pages.module import page2\n' '\n\n' @@ -285,17 +283,15 @@ def test_edit_test_explicit_page_import(self, project_session, test_utils): '\n\n' 'def teardown(data):\n' ' pass\n'.format(project, project)) - with open(path) as f: + with open(Test(project, test_name).path) as f: assert f.read() == expected - def test_edit_test_explicit_action_import(self, project_session, test_utils): - _, project = project_session.activate() + def test_edit_test_explicit_action_import(self, project_function, test_utils): + _, project = project_function.activate() test_name = test_utils.create_random_test(project) - steps = {'setup': [], 'test': [], 'teardown': []} settings_manager.save_project_settings(project, '{"implicit_actions_import": false}') test_module.edit_test(project, test_name, description='', pages=[], - steps=steps, test_data=[], tags=[]) - path = test_module.Test(project, test_name).path + steps=EMPTY_STEPS, test_data=[], tags=[]) expected = ('from golem import actions\n\n\n' 'description = \'\'\n\n' 'tags = []\n\n' @@ -306,6 +302,44 @@ def test_edit_test_explicit_action_import(self, project_session, test_utils): ' pass\n\n\n' 'def teardown(data):\n' ' pass\n') + with open(Test(project, test_name).path) as f: + assert f.read() == expected + + def test_edit_test_skip(self, project_session, test_utils): + _, project = project_session.activate() + test_name = test_utils.create_random_test(project) + test_module.edit_test(project, test_name, description='', pages=[], + steps=EMPTY_STEPS, test_data=[], tags=[], skip=True) + path = Test(project, test_name).path + expected = ('\n' + 'description = \'\'\n\n' + 'tags = []\n\n' + 'pages = []\n\n' + 'skip = True\n\n\n' + 'def setup(data):\n' + ' pass\n\n\n' + 'def test(data):\n' + ' pass\n\n\n' + 'def teardown(data):\n' + ' pass\n') + with open(path) as f: + assert f.read() == expected + # skip is string + test_module.edit_test(project, test_name, description='', pages=[], + steps=EMPTY_STEPS, test_data=[], tags=[], + skip='please skip this') + path = Test(project, test_name).path + expected = ('\n' + 'description = \'\'\n\n' + 'tags = []\n\n' + 'pages = []\n\n' + 'skip = \'please skip this\'\n\n\n' + 'def setup(data):\n' + ' pass\n\n\n' + 'def test(data):\n' + ' pass\n\n\n' + 'def teardown(data):\n' + ' pass\n') with open(path) as f: assert f.read() == expected @@ -392,6 +426,8 @@ def test_test_components(self, project_session, test_utils): components = test.components assert components['description'] == 'some description' assert components['pages'] == ['page1', 'page2'] + assert components['tags'] == [] + assert components['skip'] is False assert components['steps']['setup'] == [{'code': 'page1.func1()', 'function_name': 'page1.func1', 'parameters': [], @@ -439,3 +475,17 @@ def test_test_components_pages(self, project_session, test_utils): components = Test(project, test_name).components expected = ['page1', 'page2', 'module.page3', 'page4', 'module2.page5'] assert components['pages'].sort() == expected.sort() + + def test_test_components_skip(self, project_session, test_utils): + _, project = project_session.activate() + test_name = test_utils.create_random_test(project) + # default / empty skip is False + assert Test(project, test_name).components['skip'] is False + # skip is True + test_module.edit_test(project, test_name, description='', pages=[], + steps=EMPTY_STEPS, test_data=[], tags=[], skip=True) + assert Test(project, test_name).components['skip'] is True + # skip is string + test_module.edit_test(project, test_name, description='', pages=[], + steps=EMPTY_STEPS, test_data=[], tags=[], skip='please skip') + assert Test(project, test_name).components['skip'] == 'please skip' diff --git a/tests/test_runner/test_runner_test.py b/tests/test_runner/test_runner_test.py index 257fc150..0a8e9460 100644 --- a/tests/test_runner/test_runner_test.py +++ b/tests/test_runner/test_runner_test.py @@ -1,11 +1,21 @@ import os import json +from types import SimpleNamespace import pytest from golem.test_runner import test_runner, execution_runner +from golem.test_runner.conf import ResultsEnum from golem.gui import gui_utils from golem.core import settings_manager +from golem.core import test as test_module + + +SUCCESS_MESSAGE = 'Test Result: SUCCESS' +CODE_ERROR_MESSAGE = 'Test Result: CODE ERROR' +FAILURE_MESSAGE = 'Test Result: FAILURE' +ERROR_MESSAGE = 'Test Result: ERROR' +SKIPPED_MESSAGE = 'Test Result: SKIPPED' def _define_browsers_mock(selected_browsers): @@ -65,16 +75,6 @@ def test___get_set_name__empty_data(self): class TestRunTest: - def _create_test(self, testdir, project, name, content): - path = os.path.join(testdir, 'projects', project, 'tests', name + '.py') - with open(path, 'w+') as f: - f.write(content) - - def _create_page(self, testdir, project, name, content): - path = os.path.join(testdir, 'projects', project, 'pages', name + '.py') - with open(path, 'w+') as f: - f.write(content) - def _mock_report_directory(self, testdir, project, test_name): path = os.path.join(testdir, 'projects', project, 'reports', 'single_tests', test_name, '00001') @@ -86,12 +86,116 @@ def _read_report_json(self, report_directory): with open(report_path) as f: return json.load(f) + @pytest.fixture(scope="function") + def runfix(self, project_class, test_utils): + """A fixture that + Uses a project fix with class scope, + Creates a random test + Creates a report directory for a future execution + Gets the settings and browser values required to run test + Can run the test provided the test code + Can read the json report + """ + testdir, project = project_class.activate() + test_name = test_utils.create_random_test(project) + report_directory = self._mock_report_directory(testdir, project, test_name) + settings = settings_manager.get_project_settings(project) + browser = _define_browsers_mock(['chrome'])[0] + + def run_test(code, test_data={}, secrets={}, from_suite=False): + test_module.edit_test_code(project, test_name, code, []) + test_runner.run_test(testdir, project, test_name, test_data, secrets, + browser, settings, report_directory, from_suite=from_suite) + + def read_report(): + return self._read_report_json(report_directory) + + fix = SimpleNamespace(testdir=testdir, project=project, test_name=test_name, + report_directory=report_directory, settings=settings, + browser=browser, run_test=run_test, read_report=read_report) + return fix + + # A0 + def test_run_test__import_error_on_test(self, runfix, caplog): + """The test fails with 'code error' when it has a syntax error + Test result is code error""" + code = """ +description = 'some description' + +# missing colon +def test(data) +step('this step wont be run') +""" + runfix.run_test(code) + # verify console logs + records = caplog.records + assert records[0].message == 'Test execution started: {}'.format(runfix.test_name) + assert records[1].message == 'Browser: chrome' + assert records[2].levelname == 'ERROR' + error_contains = 'def test(data)\n ^\nSyntaxError: invalid syntax' + assert error_contains in records[2].message + assert records[3].message == CODE_ERROR_MESSAGE + # verify report.json + report = runfix.read_report() + assert report['browser'] == 'chrome' + assert report['description'] is None # description could not be read + assert report['environment'] == '' + assert len(report['errors']) == 1 + assert report['errors'][0]['message'] == 'SyntaxError: invalid syntax' + assert error_contains in report['errors'][0]['description'] + assert report['result'] == ResultsEnum.CODE_ERROR + assert report['set_name'] == '' + assert report['steps'] == [] + assert report['test_case'] == runfix.test_name + assert report['test_data'] == {} + + # A1 + def test_run_test__import_error_page(self, runfix, caplog, test_utils): + """The test fails with 'code error' when an imported page has a syntax error""" + page_code = """ +element1 = ('id', 'someId' +element2 = ('css', '.oh.no') +""" + page_name = test_utils.create_random_page(runfix.project, code=page_code) + code = """ +pages = ['{}'] +def setup(data): + step('this step wont be run') +def test(data): + step('this step wont be run') +def teardown(data): + step('this step wont be run') +""".format(page_name) + runfix.run_test(code) + # verify console logs + records = caplog.records + assert records[0].message == 'Test execution started: {}'.format(runfix.test_name) + assert records[1].message == 'Browser: chrome' + assert records[2].levelname == 'ERROR' + error_contains = "element2 = ('css', '.oh.no')\n ^\nSyntaxError: invalid syntax" + assert error_contains in records[2].message + assert records[3].message == CODE_ERROR_MESSAGE + # verify report.json + report = runfix.read_report() + assert report['browser'] == 'chrome' + assert report['description'] is None # description could not be read + assert report['environment'] == '' + assert len(report['errors']) == 1 + assert 'SyntaxError: invalid syntax' in report['errors'][0]['message'] + assert error_contains in report['errors'][0]['description'] + assert report['result'] == ResultsEnum.CODE_ERROR + assert report['set_name'] == '' + assert report['steps'] == [] + assert report['test_case'] == runfix.test_name + assert report['test_data'] == {} + assert 'test_elapsed_time' in report + assert 'test_timestamp' in report + assert len(report.keys()) == 11 + # A2 - def test_run_test__success2(self, project_function_clean, caplog, test_utils): + def test_run_test__success(self, runfix, caplog): """Test runs successfully""" - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ + code = """ description = 'some description' def setup(data): @@ -103,36 +207,29 @@ def test(data): def teardown(data): step('teardown step') """ - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - # run test - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) + runfix.run_test(code) # verify console logs records = caplog.records - assert records[0].message == 'Test execution started: {}'.format(test_name) + assert records[0].message == 'Test execution started: {}'.format(runfix.test_name) assert records[1].message == 'Browser: chrome' assert records[2].message == 'setup step' assert records[3].message == 'test step' assert records[4].message == 'teardown step' - assert records[5].message == 'Test Result: SUCCESS' + assert records[5].message == SUCCESS_MESSAGE # verify report.json - report = self._read_report_json(report_directory) + report = runfix.read_report() assert report['browser'] == 'chrome' assert report['description'] == 'some description' assert report['environment'] == '' assert report['errors'] == [] - assert report['result'] == 'success' + assert report['result'] == ResultsEnum.SUCCESS assert report['set_name'] == '' assert report['steps'] == [ {'message': 'setup step', 'screenshot': None, 'error': None}, {'message': 'test step', 'screenshot': None, 'error': None}, {'message': 'teardown step', 'screenshot': None, 'error': None}, ] - assert report['test_case'] == test_name + assert report['test_case'] == runfix.test_name assert report['test_data'] == {} assert 'test_elapsed_time' in report assert 'test_timestamp' in report @@ -140,11 +237,9 @@ def teardown(data): # A2 @pytest.mark.slow - def test_run_test__success_with_data(self, project_function_clean, caplog, test_utils): + def test_run_test__success_with_data(self, runfix, caplog): """Test runs successfully with test data""" - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ + code = """ description = 'some description' def setup(data): @@ -156,21 +251,12 @@ def test(data): def teardown(data): step('teardown step') """ - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, - test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] test_data = dict(username='username1', password='password1') secrets = dict(very='secret') - # run test - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data=test_data, secrets=secrets, browser=browser, settings=settings, - report_directory=report_directory) + runfix.run_test(code, test_data=test_data, secrets=secrets) # verify console logs records = caplog.records - assert records[0].message == 'Test execution started: {}'.format( - test_name) + assert records[0].message == 'Test execution started: {}'.format(runfix.test_name) assert records[1].message == 'Browser: chrome' # Python 3.4 results not in order TODO value_a = 'Using data:\n username: username1\n password: password1' @@ -179,14 +265,14 @@ def teardown(data): assert records[3].message == 'setup step' assert records[4].message == 'test step' assert records[5].message == 'teardown step' - assert records[6].message == 'Test Result: SUCCESS' + assert records[6].message == SUCCESS_MESSAGE # verify report.json - report = self._read_report_json(report_directory) + report = runfix.read_report() assert report['browser'] == 'chrome' assert report['description'] == 'some description' assert report['environment'] == '' assert report['errors'] == [] - assert report['result'] == 'success' + assert report['result'] == ResultsEnum.SUCCESS # Python 3.4 TODO assert report['set_name'] in ['username1', 'password1'] assert report['steps'] == [ @@ -194,120 +280,19 @@ def teardown(data): {'message': 'test step', 'screenshot': None, 'error': None}, {'message': 'teardown step', 'screenshot': None, 'error': None}, ] - assert report['test_case'] == test_name + assert report['test_case'] == runfix.test_name assert report['test_data'] == {'username': "'username1'", 'password': "'password1'"} assert 'test_elapsed_time' in report assert 'test_timestamp' in report assert len(report.keys()) == 11 - # A0 - def test_run_test__import_error_on_test(self, project_function_clean, caplog, test_utils): - """The test fails with 'code error' when it has a syntax error - Test result is code error""" - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ -description = 'some description' - -# missing colon -def test(data) - step('this step wont be run') -""" - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, - test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) - # verify console logs - records = caplog.records - assert records[0].message == 'Test execution started: {}'.format( - test_name) - assert records[1].message == 'Browser: chrome' - assert records[2].levelname == 'ERROR' - error_contains = 'def test(data)\n ^\nSyntaxError: invalid syntax' - assert error_contains in records[2].message - assert records[3].message == 'Test Result: CODE ERROR' - # verify report.json - report = self._read_report_json(report_directory) - assert report['browser'] == 'chrome' - assert report['description'] is None # description could not be read - assert report['environment'] == '' - assert len(report['errors']) == 1 - assert report['errors'][0]['message'] == 'SyntaxError: invalid syntax' - assert error_contains in report['errors'][0]['description'] - assert report['result'] == 'code error' - assert report['set_name'] == '' - assert report['steps'] == [] - assert report['test_case'] == test_name - assert report['test_data'] == {} - - # A1 - def test_run_test__import_error_page(self, project_function_clean, caplog, test_utils): - """The test fails with 'code error' when an imported page has a syntax error""" - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ -pages = ['page1'] - -def setup(data): - step('this step wont be run') - -def test(data): - step('this step wont be run') - -def teardown(data): - step('this step wont be run') -""" - self._create_test(testdir, project, test_name, content) - page_content = """ -element1 = ('id', 'someId' -element2 = ('css', '.oh.no') -""" - self._create_page(testdir, project, 'page1', page_content) - report_directory = self._mock_report_directory(testdir, project, test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) - # verify console logs - records = caplog.records - assert records[0].message == 'Test execution started: {}'.format(test_name) - assert records[1].message == 'Browser: chrome' - assert records[2].levelname == 'ERROR' - error_contains = "element2 = ('css', '.oh.no')\n ^\nSyntaxError: invalid syntax" - assert error_contains in records[2].message - assert records[3].message == 'Test Result: CODE ERROR' - # verify report.json - report = self._read_report_json(report_directory) - assert report['browser'] == 'chrome' - assert report['description'] is None # description could not be read - assert report['environment'] == '' - assert len(report['errors']) == 1 - assert 'SyntaxError: invalid syntax' in report['errors'][0]['message'] - assert error_contains in report['errors'][0]['description'] - assert report['result'] == 'code error' - assert report['set_name'] == '' - assert report['steps'] == [] - assert report['test_case'] == test_name - assert report['test_data'] == {} - assert 'test_elapsed_time' in report - assert 'test_timestamp' in report - assert len(report.keys()) == 11 - # A3 - def test_run_test__AssertionError_in_setup(self, project_function_clean, - caplog, test_utils): + def test_run_test__AssertionError_in_setup(self, runfix, caplog): """The test ends with 'failure' when the setup function throws AssertionError. Test is not run Teardown is run """ - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ + code = """ description = 'desc' def setup(data): @@ -319,44 +304,35 @@ def test(data): def teardown(data): step('teardown step') """ - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) + runfix.run_test(code) # verify console logs records = caplog.records - assert records[0].message == 'Test execution started: {}'.format(test_name) + assert records[0].message == 'Test execution started: {}'.format(runfix.test_name) assert records[1].message == 'Browser: chrome' assert records[2].levelname == 'ERROR' assert 'setup step fail' in records[2].message assert 'AssertionError: setup step fail' in records[2].message assert records[3].message == 'teardown step' - assert records[4].message == 'Test Result: FAILURE' + assert records[4].message == FAILURE_MESSAGE # verify report.json - report = self._read_report_json(report_directory) + report = runfix.read_report() assert report['description'] == 'desc' assert len(report['errors']) == 1 assert 'setup step fail' in report['errors'][0]['message'] - assert report['result'] == 'failure' + assert report['result'] == ResultsEnum.FAILURE assert report['steps'][0]['message'] == 'Failure' assert 'AssertionError: setup step fail' in report['steps'][0]['error']['description'] assert report['steps'][1]['message'] == 'teardown step' # A4 @pytest.mark.slow - def test_run_test__failure_and_error_in_setup(self, project_function_clean, - caplog, test_utils): + def test_run_test__failure_and_error_in_setup(self, runfix, caplog): """The test ends with 'failure' when the setup function throws AssertionError, even when there's an error in setup Test is not run Teardown is run """ - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ + code = """ description = 'desc' def setup(data): @@ -369,35 +345,26 @@ def test(data): def teardown(data): step('teardown step') """ - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) + runfix.run_test(code) # verify console logs records = caplog.records - assert records[5].message == 'Test Result: FAILURE' + assert records[5].message == FAILURE_MESSAGE # verify report.json - report = self._read_report_json(report_directory) + report = runfix.read_report() assert len(report['errors']) == 2 - assert report['result'] == 'failure' + assert report['result'] == ResultsEnum.FAILURE assert len(report['steps']) == 3 assert report['errors'][0]['message'] == 'error in setup' assert report['errors'][1]['message'] == 'AssertionError: setup step fail' # A5 - def test_run_test__failure_in_setup_error_in_teardown(self, project_function_clean, - caplog, test_utils): + def test_run_test__failure_in_setup_error_in_teardown(self, runfix, caplog): """Setup throws AssertionError Teardown throws error Test ends with 'failure' test() is not run """ - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ + code = """ description = 'desc' def setup(data): @@ -410,36 +377,27 @@ def teardown(data): step('teardown step') error('error in teardown') """ - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) + runfix.run_test(code) # verify console logs records = caplog.records - assert records[5].message == 'Test Result: FAILURE' + assert records[5].message == FAILURE_MESSAGE # verify report.json - report = self._read_report_json(report_directory) + report = runfix.read_report() assert len(report['errors']) == 2 - assert report['result'] == 'failure' + assert report['result'] == ResultsEnum.FAILURE assert len(report['steps']) == 3 assert report['errors'][0]['message'] == 'AssertionError: setup step fail' assert report['errors'][1]['message'] == 'error in teardown' # A6 @pytest.mark.slow - def test_run_test__failure_in_setup_exception_in_teardown(self, project_function_clean, - caplog, test_utils): + def test_run_test__failure_in_setup_exception_in_teardown(self, runfix, caplog): """Setup throws AssertionError Teardown throws AssertionError Test ends with 'failure' test() is not run """ - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ + code = """ description = 'desc' def setup(data): @@ -452,36 +410,27 @@ def teardown(data): step('teardown step') foo = bar """ - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) + runfix.run_test(code) # verify console logs records = caplog.records - assert records[5].message == 'Test Result: FAILURE' + assert records[5].message == FAILURE_MESSAGE # verify report.json - report = self._read_report_json(report_directory) + report = runfix.read_report() assert len(report['errors']) == 2 - assert report['result'] == 'failure' + assert report['result'] == ResultsEnum.FAILURE assert len(report['steps']) == 3 assert report['errors'][0]['message'] == 'AssertionError: setup step fail' assert report['errors'][1]['message'] == "NameError: name 'bar' is not defined" # A8 @pytest.mark.slow - def test_run_test__failure_in_setup_failure_in_teardown(self, project_function_clean, - caplog, test_utils): + def test_run_test__failure_in_setup_failure_in_teardown(self, runfix, caplog): """Setup throws AssertionError Teardown throws exception Test ends with 'failure' test() is not run """ - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ + code = """ description = 'desc' def setup(data): @@ -493,35 +442,26 @@ def test(data): def teardown(data): fail('failure in teardown') """ - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) + runfix.run_test(code) # verify console logs records = caplog.records - assert records[4].message == 'Test Result: FAILURE' + assert records[4].message == FAILURE_MESSAGE # verify report.json - report = self._read_report_json(report_directory) + report = runfix.read_report() assert len(report['errors']) == 2 - assert report['result'] == 'failure' + assert report['result'] == ResultsEnum.FAILURE assert len(report['steps']) == 2 assert report['errors'][0]['message'] == 'AssertionError: setup step fail' assert report['errors'][1]['message'] == 'AssertionError: failure in teardown' # B0 - def test_run_test__exception_in_setup(self, project_function_clean, - caplog, test_utils): + def test_run_test__exception_in_setup(self, runfix, caplog): """Setup throws exception Test ends with 'code error' test() is not run teardown() is run """ - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ + code = """ description = 'desc' def setup(data): @@ -533,34 +473,25 @@ def test(data): def teardown(data): step('teardown step') """ - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) + runfix.run_test(code) # verify console logs records = caplog.records - assert records[4].message == 'Test Result: CODE ERROR' + assert records[4].message == CODE_ERROR_MESSAGE # verify report.json - report = self._read_report_json(report_directory) + report = runfix.read_report() assert len(report['errors']) == 1 - assert report['result'] == 'code error' + assert report['result'] == ResultsEnum.CODE_ERROR assert len(report['steps']) == 2 assert report['errors'][0]['message'] == "NameError: name 'bar' is not defined" # B1 - def test_run_test__exception_and_error_in_setup(self, project_function_clean, - caplog, test_utils): + def test_run_test__exception_and_error_in_setup(self, runfix, caplog): """Setup has error and throws exception Test ends with 'code error' test() is not run teardown() is run """ - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ + code = """ description = 'desc' def setup(data): @@ -573,35 +504,26 @@ def test(data): def teardown(data): step('teardown step') """ - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) + runfix.run_test(code) # verify console logs records = caplog.records - assert records[5].message == 'Test Result: CODE ERROR' + assert records[5].message == CODE_ERROR_MESSAGE # verify report.json - report = self._read_report_json(report_directory) - assert report['result'] == 'code error' + report = runfix.read_report() + assert report['result'] == ResultsEnum.CODE_ERROR assert len(report['steps']) == 3 assert len(report['errors']) == 2 assert report['errors'][0]['message'] == 'setup error' assert report['errors'][1]['message'] == "NameError: name 'bar' is not defined" # B3 - def test_run_test__exception_in_setup_exception_in_teardown(self, project_function_clean, - caplog, test_utils): + def test_run_test__exception_in_setup_exception_in_teardown(self, runfix, caplog): """Setup throws exception Teardown throws exception Test ends with 'code error' test() is not run """ - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ + code = """ description = 'desc' def setup(data): @@ -613,35 +535,26 @@ def test(data): def teardown(data): foo = baz """ - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) + runfix.run_test(code) # verify console logs records = caplog.records - assert records[4].message == 'Test Result: CODE ERROR' + assert records[4].message == CODE_ERROR_MESSAGE # verify report.json - report = self._read_report_json(report_directory) - assert report['result'] == 'code error' + report = runfix.read_report() + assert report['result'] == ResultsEnum.CODE_ERROR assert len(report['steps']) == 2 assert len(report['errors']) == 2 assert report['errors'][0]['message'] == "NameError: name 'bar' is not defined" assert report['errors'][1]['message'] == "NameError: name 'baz' is not defined" # B5 - def test_run_test__exception_in_setup_failure_in_teardown(self, project_function_clean, - caplog, test_utils): + def test_run_test__exception_in_setup_failure_in_teardown(self, runfix, caplog): """Setup throws exception Teardown throws AssertionError Test ends with 'code error' test() is not run """ - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ + code = """ description = 'desc' def setup(data): @@ -653,33 +566,25 @@ def test(data): def teardown(data): fail('teardown failure') """ - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) + runfix.run_test(code) # verify console logs records = caplog.records - assert records[4].message == 'Test Result: CODE ERROR' + assert records[4].message == CODE_ERROR_MESSAGE # verify report.json - report = self._read_report_json(report_directory) - assert report['result'] == 'code error' + report = runfix.read_report() + assert report['result'] == ResultsEnum.CODE_ERROR assert len(report['steps']) == 2 assert len(report['errors']) == 2 assert report['errors'][0]['message'] == "NameError: name 'bar' is not defined" assert report['errors'][1]['message'] == 'AssertionError: teardown failure' # B7 - def test_run_test__error_in_setup(self, project_function_clean, caplog, test_utils): + def test_run_test__error_in_setup(self, runfix, caplog): """Setup has error test() is run teardown() is run """ - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ + code = """ description = 'desc' def setup(data): @@ -691,33 +596,24 @@ def test(data): def teardown(data): step('teardown step') """ - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) + runfix.run_test(code) # verify console logs records = caplog.records - assert records[5].message == 'Test Result: ERROR' + assert records[5].message == ERROR_MESSAGE # verify report.json - report = self._read_report_json(report_directory) - assert report['result'] == 'error' + report = runfix.read_report() + assert report['result'] == ResultsEnum.ERROR assert len(report['steps']) == 3 assert len(report['errors']) == 1 assert report['errors'][0]['message'] == "setup error" # B9 - def test_run_test__error_in_setup_exception_in_teardown(self, project_function_clean, - caplog, test_utils): + def test_run_test__error_in_setup_exception_in_teardown(self, runfix, caplog): """Setup has error Teardown throws exception test() is run """ - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ + code = """ description = 'desc' def setup(data): @@ -729,34 +625,25 @@ def test(data): def teardown(data): foo = bar """ - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) + runfix.run_test(code) # verify console logs records = caplog.records - assert records[5].message == 'Test Result: CODE ERROR' + assert records[5].message == CODE_ERROR_MESSAGE # verify report.json - report = self._read_report_json(report_directory) - assert report['result'] == 'code error' + report = runfix.read_report() + assert report['result'] == ResultsEnum.CODE_ERROR assert len(report['steps']) == 3 assert len(report['errors']) == 2 assert report['errors'][0]['message'] == 'setup error' assert report['errors'][1]['message'] == "NameError: name 'bar' is not defined" # C0 - def test_run_test__error_in_setup_failure_in_teardown(self, project_function_clean, - caplog, test_utils): + def test_run_test__error_in_setup_failure_in_teardown(self, runfix, caplog): """Setup has error Teardown throws AssertionError test() is run """ - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ + code = """ description = 'desc' def setup(data): @@ -768,32 +655,24 @@ def test(data): def teardown(data): fail('teardown fail') """ - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) + runfix.run_test(code) # verify console logs records = caplog.records - assert records[5].message == 'Test Result: FAILURE' + assert records[5].message == FAILURE_MESSAGE # verify report.json - report = self._read_report_json(report_directory) - assert report['result'] == 'failure' + report = runfix.read_report() + assert report['result'] == ResultsEnum.FAILURE assert len(report['steps']) == 3 assert len(report['errors']) == 2 assert report['errors'][0]['message'] == 'setup error' assert report['errors'][1]['message'] == 'AssertionError: teardown fail' # C1 - def test_run_test__failure_in_test(self, project_function_clean, caplog, test_utils): + def test_run_test__failure_in_test(self, runfix, caplog): """test() throws AssertionError teardown() is run """ - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ + code = """ description = 'desc' def setup(data): @@ -806,32 +685,23 @@ def test(data): def teardown(data): step('teardown step') """ - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) + runfix.run_test(code) # verify console logs records = caplog.records - assert records[6].message == 'Test Result: FAILURE' + assert records[6].message == FAILURE_MESSAGE # verify report.json - report = self._read_report_json(report_directory) - assert report['result'] == 'failure' + report = runfix.read_report() + assert report['result'] == ResultsEnum.FAILURE assert len(report['steps']) == 4 assert len(report['errors']) == 1 assert report['errors'][0]['message'] == 'AssertionError: test fail' # C2 - def test_run_test__failure_and_error_in_test(self, project_function_clean, - caplog, test_utils): + def test_run_test__failure_and_error_in_test(self, runfix, caplog): """test() has error and throws AssertionError teardown() is run """ - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ + code = """ description = 'desc' def setup(data): @@ -844,33 +714,24 @@ def test(data): def teardown(data): step('teardown step') """ - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) + runfix.run_test(code) # verify console logs records = caplog.records - assert records[6].message == 'Test Result: FAILURE' + assert records[6].message == FAILURE_MESSAGE # verify report.json - report = self._read_report_json(report_directory) - assert report['result'] == 'failure' + report = runfix.read_report() + assert report['result'] == ResultsEnum.FAILURE assert len(report['steps']) == 4 assert len(report['errors']) == 2 assert report['errors'][0]['message'] == 'test error' assert report['errors'][1]['message'] == 'AssertionError: test fail' # C5 - def test_run_test__failure_in_test_exception_in_teardown(self, project_function_clean, - caplog, test_utils): + def test_run_test__failure_in_test_exception_in_teardown(self, runfix, caplog): """test() throws AssertionError teardown() throws exception """ - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ + code = """ description = 'desc' def setup(data): @@ -882,33 +743,24 @@ def test(data): def teardown(data): foo = bar """ - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) + runfix.run_test(code) # verify console logs records = caplog.records - assert records[5].message == 'Test Result: FAILURE' + assert records[5].message == FAILURE_MESSAGE # verify report.json - report = self._read_report_json(report_directory) - assert report['result'] == 'failure' + report = runfix.read_report() + assert report['result'] == ResultsEnum.FAILURE assert len(report['steps']) == 3 assert len(report['errors']) == 2 assert report['errors'][0]['message'] == 'AssertionError: test fail' assert report['errors'][1]['message'] == "NameError: name 'bar' is not defined" # C7 - def test_run_test__failure_in_test_failure_in_teardown(self, project_function_clean, - caplog, test_utils): + def test_run_test__failure_in_test_failure_in_teardown(self, runfix, caplog): """test() throws AssertionError teardown() throws AssertionError """ - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ + code = """ description = 'desc' def setup(data): @@ -920,30 +772,22 @@ def test(data): def teardown(data): fail('teardown fail') """ - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) + runfix.run_test(code) # verify console logs records = caplog.records - assert records[5].message == 'Test Result: FAILURE' + assert records[5].message == FAILURE_MESSAGE # verify report.json - report = self._read_report_json(report_directory) - assert report['result'] == 'failure' + report = runfix.read_report() + assert report['result'] == ResultsEnum.FAILURE assert len(report['steps']) == 3 assert len(report['errors']) == 2 assert report['errors'][0]['message'] == 'AssertionError: test fail' assert report['errors'][1]['message'] == 'AssertionError: teardown fail' # C8 - def test_run_test__exception_in_test(self, project_function_clean, caplog, test_utils): + def test_run_test__exception_in_test(self, runfix, caplog): """test() throws exception""" - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ + code = """ description = 'desc' def setup(data): @@ -955,33 +799,23 @@ def test(data): def teardown(data): step('teardown step') """ - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) + runfix.run_test(code) # verify console logs records = caplog.records - assert records[5].message == 'Test Result: CODE ERROR' + assert records[5].message == CODE_ERROR_MESSAGE # verify report.json - report = self._read_report_json(report_directory) - assert report['result'] == 'code error' + report = runfix.read_report() + assert report['result'] == ResultsEnum.CODE_ERROR assert len(report['steps']) == 3 assert len(report['errors']) == 1 assert report['errors'][0]['message'] == "NameError: name 'bar' is not defined" # C9 - def test_run_test__error_and_exception_in_test(self, project_function_clean, - caplog, test_utils): + def test_run_test__error_and_exception_in_test(self, runfix, caplog): """test() throws error and AssertionError teardown() """ - testdir, project = project_function_clean.activate() - project = project_function_clean.name - test_name = test_utils.random_numeric_string(10) - content = """ + code = """ description = 'desc' def setup(data): @@ -994,33 +828,24 @@ def test(data): def teardown(data): step('teardown step') """ - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) + runfix.run_test(code) # verify console logs records = caplog.records - assert records[6].message == 'Test Result: CODE ERROR' + assert records[6].message == CODE_ERROR_MESSAGE # verify report.json - report = self._read_report_json(report_directory) - assert report['result'] == 'code error' + report = runfix.read_report() + assert report['result'] == ResultsEnum.CODE_ERROR assert len(report['steps']) == 4 assert len(report['errors']) == 2 assert report['errors'][0]['message'] == 'error in test' assert report['errors'][1]['message'] == "NameError: name 'bar' is not defined" # D4 - def test_run_test__exception_in_test_failure_in_teardown(self, project_function_clean, - caplog, test_utils): + def test_run_test__exception_in_test_failure_in_teardown(self, runfix, caplog): """test() throws exception teardown() throws AssertionError """ - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ + code = """ description = 'desc' def setup(data): @@ -1032,32 +857,23 @@ def test(data): def teardown(data): fail('teardown fail') """ - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) + runfix.run_test(code) # verify console logs records = caplog.records - assert records[5].message == 'Test Result: CODE ERROR' + assert records[5].message == CODE_ERROR_MESSAGE # verify report.json - report = self._read_report_json(report_directory) - assert report['result'] == 'code error' + report = runfix.read_report() + assert report['result'] == ResultsEnum.CODE_ERROR assert len(report['steps']) == 3 assert len(report['errors']) == 2 assert report['errors'][0]['message'] == "NameError: name 'bar' is not defined" assert report['errors'][1]['message'] == 'AssertionError: teardown fail' # D7 - def test_run_test__error_in_setup_test_and_teardown(self, project_function_clean, - caplog, test_utils): + def test_run_test__error_in_setup_test_and_teardown(self, runfix, caplog): """setup(), test() and teardown() have errors """ - testdir, project = project_function_clean.activate() - test_name = test_utils.random_numeric_string(10) - content = """ + code = """ description = 'desc' def setup(data): @@ -1069,21 +885,90 @@ def test(data): def teardown(data): error('teardown error') """ - self._create_test(testdir, project, test_name, content) - report_directory = self._mock_report_directory(testdir, project, test_name) - settings = settings_manager.get_project_settings(project) - browser = _define_browsers_mock(['chrome'])[0] - test_runner.run_test(testdir=testdir, project=project, test_name=test_name, - test_data={}, secrets={}, browser=browser, settings=settings, - report_directory=report_directory) + runfix.run_test(code) # verify console logs records = caplog.records - assert records[5].message == 'Test Result: ERROR' + assert records[5].message == ERROR_MESSAGE # verify report.json - report = self._read_report_json(report_directory) - assert report['result'] == 'error' + report = runfix.read_report() + assert report['result'] == ResultsEnum.ERROR assert len(report['steps']) == 3 assert len(report['errors']) == 3 assert report['errors'][0]['message'] == 'setup error' assert report['errors'][1]['message'] == 'test error' assert report['errors'][2]['message'] == 'teardown error' + + # TestRunner decision table: Skip is True + # + # CE : code error + # S : success + # F : failure + # SK : skip + # + # S0 S1 S2 + # Skip is True Y Y Y + # Import error test N N Y + # Import error page N N . + # Run from suite N Y . + # + # result S SK CE + # setup is run Y N N + # test is run Y N N + # teardown is run Y N N + + # S0 + def test_run_test__skip_true__not_from_suite(self, runfix, caplog): + code = ('skip = True\n' + 'def setup(data):\n' + ' step("setup")\n' + 'def test(data):\n' + ' step("test")\n' + 'def teardown(data):\n' + ' step("teardown")') + runfix.run_test(code, from_suite=False) + # verify console logs + records = caplog.records + assert records[2].message == 'setup' + assert records[3].message == 'test' + assert records[4].message == 'teardown' + assert records[5].message == SUCCESS_MESSAGE + # verify report.json + report = runfix.read_report() + assert report['result'] == ResultsEnum.SUCCESS + + # S1 + def test_run_test__skip_true__from_suite(self, runfix, caplog): + code = ('skip = True\n' + 'def setup(data):\n' + ' step("setup")\n' + 'def test(data):\n' + ' step("test")\n' + 'def teardown(data):\n' + ' step("teardown")') + runfix.run_test(code, from_suite=True) + # verify console logs + records = caplog.records + assert records[2].message == 'Skip' + assert records[3].message == SKIPPED_MESSAGE + # verify report.json + report = runfix.read_report() + assert report['result'] == ResultsEnum.SKIPPED + + # S1 + def test_run_test__skip_true__syntax_error(self, runfix, caplog): + """when test with skip=True has an error on import the test + ends with code error + """ + code = ('skip = True\n' + 'def test(data)\n' + ' step("test")\n') + runfix.run_test(code, from_suite=True) + # verify console logs + records = caplog.records + assert records[2].levelname == 'ERROR' + assert records[3].message == CODE_ERROR_MESSAGE + # verify report.json + report = runfix.read_report() + assert len(report['errors']) == 1 + assert report['errors'][0]['message'] == 'SyntaxError: invalid syntax' + assert report['result'] == ResultsEnum.CODE_ERROR