3166-T2-R1-e2e-mauromalara.html

Report generated on 29-Aug-2022 at 08:50:48 by pytest-html v3.1.1

Environment

Packages {"pluggy": "0.13.1", "py": "1.10.0", "pytest": "6.2.2"}
Platform Linux-5.15.0-46-generic-x86_64-with-glibc2.35
Plugins {"html": "3.1.1", "metadata": "2.0.1", "testinfra": "5.0.0"}
Python 3.8.10

Summary

0 tests ran in 38.54 seconds.

0 passed, 0 skipped, 0 failed, 1 errors, 0 expected failures, 0 unexpected passes

Results

Result Test Duration Links
Error tests/end_to_end/test_basic_cases/test_brute_force/test_brute_force_rdp/test_brute_force_rdp.py::test_brute_force_rdp[rdp_brute_force]::setup 38.36
request = <SubRequest 'validate_environments' for <Function test_brute_force_rdp[rdp_brute_force]>>

@pytest.fixture(scope='session', autouse=True)
def validate_environments(request):
"""Fixture with session scope to validate the environments before run the E2E tests.

This phase is divided into 4 steps:
Step 1: Collect the data related to the selected tests that will be executed.
Step 2: Generate a playbook containing cross-checks for selected tests.
Step 3: Run the generated playbook.

Args:
request (fixture): Gives access to the requesting test context.
"""
collected_items = request.session.items
roles_path = request.config.getoption('--roles-path')
inventory_path = request.config.getoption('--inventory_path')
playbook_generator = os.path.join(suite_path, 'data', 'validation_playbooks', 'generate_general_play.yaml')
playbook_template = os.path.join(suite_path, 'data', 'validation_templates', 'general_validation.j2')
general_playbook = os.path.join(suite_path, 'data', 'validation_playbooks', 'general_validation.yaml')

if not inventory_path:
raise ValueError('Inventory not specified')

# --------------------------------------- Step 1: Prepare the necessary data ---------------------------------------
test_suites_paths = []
target_hosts = []
target_distros = []

# Get the path of the tests from collected items.
collected_paths = [item.fspath for item in collected_items]
# Remove duplicates caused by the existence of 2 or more test cases
collected_paths = list(dict.fromkeys(collected_paths))

for path in collected_paths:
# Remove the name of the file from the path
path = str(path).rsplit('/', 1)[0]
# Add the test suite path
test_suites_paths.append(path)
# Get the test suite name
test_suite_name = path.split('/')[-1:][0]
# Set target hosts and distros
target_hosts, target_distros = get_target_hosts_and_distros(test_suite_name, target_distros, target_hosts)
# -------------------------------------------------- End of Step 1 -------------------------------------------------

# ---------------------- Step 2: Run the playbook to generate the general validation playbook ----------------------
gen_parameters = {
'playbook': playbook_generator, 'inventory': inventory_path,
'extravars': {
'template_path': playbook_template,
'dest_path': general_playbook,
'target_hosts': ','.join(target_hosts),
'distros': target_distros
}
}
ansible_runner.run(**gen_parameters)
# -------------------------------------------------- End of Step 2 -------------------------------------------------

# ----------------------------------- Step 3: Run the general validation playbook ----------------------------------
parameters = {
'playbook': general_playbook,
'inventory': inventory_path,
'envvars': {'ANSIBLE_ROLES_PATH': roles_path}
}
general_validation_runner = ansible_runner.run(**parameters)
# Remove the generated playbook
remove_file(general_playbook)
# If the general validations have failed, then abort the execution finishing with an error. Else, continue.
if general_validation_runner.status == 'failed':
# Collect inventory_hostnames with errors
hosts_with_errors = [key for key in general_validation_runner.stats['failures']]
# Collect list of errors
errors = []
errors.extend([general_validation_runner.get_fact_cache(host)['phase_results'] for host in hosts_with_errors])
errors = ''.join(errors)
# Raise the exception with errors details
> raise Exception(f"The general validations have failed. Please check that the environments meet the expected "
f"requirements. Result:\n{errors}")
E Exception: The general validations have failed. Please check that the environments meet the expected requirements. Result:

tests/end_to_end/conftest.py:111: Exception
-----------------------------Captured stdout setup------------------------------
PLAY [localhost] *************************************************************** TASK [Gathering Facts] ********************************************************* ok: [localhost] TASK [Generate a general validation playbook] ********************************** changed: [localhost] PLAY RECAP ********************************************************************* localhost : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 PLAY [General validation phase] ************************************************ TASK [Gathering Facts] ********************************************************* fatal: [centos-manager]: UNREACHABLE! => {"changed": false, "msg": "Failed to connect to the host via ssh: ssh: connect to host 172.31.3.228 port 22: Connection timed out", "unreachable": true} fatal: [windows-agent]: UNREACHABLE! => {"changed": false, "msg": "ssl: HTTPSConnectionPool(host='172.31.10.1', port=5986): Max retries exceeded with url: /wsman (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7f817da69d60>, 'Connection to 172.31.10.1 timed out. (connect timeout=30)'))", "unreachable": true} NO MORE HOSTS LEFT ************************************************************* PLAY RECAP ********************************************************************* centos-manager : ok=0 changed=0 unreachable=1 failed=0 skipped=0 rescued=0 ignored=0 windows-agent : ok=0 changed=0 unreachable=1 failed=0 skipped=0 rescued=0 ignored=0