Report generated on 30-Aug-2022 at 00:28:56 by pytest-html v3.1.1
Packages | {"pluggy": "0.13.1", "py": "1.10.0", "pytest": "6.2.2"} |
Platform | Linux-5.15.0-46-generic-x86_64-with-glibc2.35 |
Plugins | {"html": "3.1.1", "metadata": "2.0.1", "testinfra": "5.0.0"} |
Python | 3.8.10 |
0 tests ran in 10.50 seconds.
(Un)check the boxes to filter the results.
0 passed, 0 skipped, 0 failed, 1 errors, 0 expected failures, 0 unexpected passesResult | Test | Duration | Links |
---|---|---|---|
No results found. Try to check the filters | |||
Error | tests/end_to_end/test_basic_cases/test_emotet/test_emotet.py::test_emotet[emotet_attack]::setup | 10.39 | |
request = <SubRequest 'validate_environments' for <Function test_emotet[emotet_attack]>> @pytest.fixture(scope='session', autouse=True) def validate_environments(request): """Fixture with session scope to validate the environments before run the E2E tests. This phase is divided into 4 steps: Step 1: Collect the data related to the selected tests that will be executed. Step 2: Generate a playbook containing cross-checks for selected tests. Step 3: Run the generated playbook. Args: request (fixture): Gives access to the requesting test context. """ collected_items = request.session.items roles_path = request.config.getoption('--roles-path') inventory_path = request.config.getoption('--inventory_path') playbook_generator = os.path.join(suite_path, 'data', 'validation_playbooks', 'generate_general_play.yaml') playbook_template = os.path.join(suite_path, 'data', 'validation_templates', 'general_validation.j2') general_playbook = os.path.join(suite_path, 'data', 'validation_playbooks', 'general_validation.yaml') if not inventory_path: raise ValueError('Inventory not specified') # --------------------------------------- Step 1: Prepare the necessary data --------------------------------------- test_suites_paths = [] target_hosts = [] target_distros = [] # Get the path of the tests from collected items. collected_paths = [item.fspath for item in collected_items] # Remove duplicates caused by the existence of 2 or more test cases collected_paths = list(dict.fromkeys(collected_paths)) for path in collected_paths: # Remove the name of the file from the path path = str(path).rsplit('/', 1)[0] # Add the test suite path test_suites_paths.append(path) # Get the test suite name test_suite_name = path.split('/')[-1:][0] # Set target hosts and distros target_hosts, target_distros = get_target_hosts_and_distros(test_suite_name, target_distros, target_hosts) # -------------------------------------------------- End of Step 1 ------------------------------------------------- # ---------------------- Step 2: Run the playbook to generate the general validation playbook ---------------------- gen_parameters = { 'playbook': playbook_generator, 'inventory': inventory_path, 'extravars': { 'template_path': playbook_template, 'dest_path': general_playbook, 'target_hosts': ','.join(target_hosts), 'distros': target_distros } } ansible_runner.run(**gen_parameters) # -------------------------------------------------- End of Step 2 ------------------------------------------------- # ----------------------------------- Step 3: Run the general validation playbook ---------------------------------- parameters = { 'playbook': general_playbook, 'inventory': inventory_path, 'envvars': {'ANSIBLE_ROLES_PATH': roles_path} } general_validation_runner = ansible_runner.run(**parameters) # Remove the generated playbook remove_file(general_playbook) # If the general validations have failed, then abort the execution finishing with an error. Else, continue. if general_validation_runner.status == 'failed': # Collect inventory_hostnames with errors hosts_with_errors = [key for key in general_validation_runner.stats['failures']] # Collect list of errors errors = [] errors.extend([general_validation_runner.get_fact_cache(host)['phase_results'] for host in hosts_with_errors]) errors = ''.join(errors) # Raise the exception with errors details > raise Exception(f"The general validations have failed. Please check that the environments meet the expected " f"requirements. Result:\n{errors}") E Exception: The general validations have failed. Please check that the environments meet the expected requirements. Result: tests/end_to_end/conftest.py:111: Exception -----------------------------Captured stdout setup------------------------------ PLAY [localhost] *************************************************************** TASK [Gathering Facts] ********************************************************* ok: [localhost] TASK [Generate a general validation playbook] ********************************** changed: [localhost] PLAY RECAP ********************************************************************* localhost : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 PLAY [General validation phase] ************************************************ TASK [Gathering Facts] ********************************************************* fatal: [centos-manager]: UNREACHABLE! => {"changed": false, "msg": "Failed to create temporary directory. In some cases, you may have been able to authenticate and did not have permissions on the target directory. Consider changing the remote tmp path in ansible.cfg to a path rooted in \"/tmp\", for more error information use -vvv. Failed command was: ( umask 77 && mkdir -p \"` echo ~/.ansible/tmp `\"&& mkdir \"` echo ~/.ansible/tmp/ansible-tmp-1661830132.1368904-53698-219197449241095 `\" && echo ansible-tmp-1661830132.1368904-53698-219197449241095=\"` echo ~/.ansible/tmp/ansible-tmp-1661830132.1368904-53698-219197449241095 `\" ), exited with result 1", "unreachable": true} ok: [windows-agent] NO MORE HOSTS LEFT ************************************************************* PLAY RECAP ********************************************************************* centos-manager : ok=0 changed=0 unreachable=1 failed=0 skipped=0 rescued=0 ignored=0 windows-agent : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 |