diff --git a/Makefile b/Makefile index 6620589a4..16aa7889e 100644 --- a/Makefile +++ b/Makefile @@ -7,6 +7,8 @@ include Makefile.validation .PHONY: version test build +E2E_VM_SERVICE_NODE_IP := $(shell echo $(CLUSTER_IPS) | cut -d',' -f1) + # print version version: @printf $(TAG) @@ -134,3 +136,54 @@ generate-api: compile-proto generate-baremetal-crds generate-deepcopy generate-s # Used for UT. Need to regenerate after updating k8s API version generate-mocks: install-mockery mockery --dir=/usr/local/go/pkg/mod/k8s.io/client-go\@$(CLIENT_GO_VER)/kubernetes/typed/core/v1/ --name=EventInterface --output=pkg/events/mocks + + +run-csi-baremetal-functional-tests: + @echo "Configuring functional tests for csi-baremetal..."; \ + edited_list=$$(echo ${CLUSTER_IPS} | sed 's/, /", "/g; s/^/"/; s/$$/"/'); \ + echo "edited_list: $$edited_list"; \ + sed -i '/parser.addoption("--login", action="store", default=""/s/default=""/default="${USERNAME}"/' ${PROJECT_DIR}/tests/e2e-test-framework/conftest.py; \ + sed -i '/parser.addoption("--password", action="store", default=""/s/default=""/default="${PASSWORD}"/' ${PROJECT_DIR}/tests/e2e-test-framework/conftest.py; \ + sed -i '/parser.addoption("--hosts", action="store", default=\[\], help="Hosts")/s/default=\[\],/default=\['"$$edited_list"'\],/' ${PROJECT_DIR}/tests/e2e-test-framework/conftest.py; \ + sed -i '/parser.addoption("--qtest_token", action="store", default=""/s/default=""/default="${QTEST_API_KEY}"/' ${PROJECT_DIR}/tests/e2e-test-framework/conftest.py; \ + sed -i '/parser.addoption("--qtest_test_suite", action="store", default=""/s/default=""/default="${QTEST_SUITE_ID}"/' ${PROJECT_DIR}/tests/e2e-test-framework/conftest.py; \ + sed -i '/parser.addoption("--ansible_server", action="store", default=""/s/default="",/default="${ANSIBLE_SERVER_IP}",/' ${PROJECT_DIR}/tests/e2e-test-framework/conftest.py; \ + if [ ${REGRESSION_JOB_ENABLE} == "true" && ${SKIP_UPGRADE} == "false" ]; then \ + sed -i '/parser.addoption("--cmo_bundle_version", action="store", default=""/s/default=""/default="${BUNDLE_VERSION}"/' ${PROJECT_DIR}/tests/e2e-test-framework/conftest.py; \ + fi; \ + echo "conftest.py:"; \ + cat ${PROJECT_DIR}/tests/e2e-test-framework/conftest.py; \ + echo "Copying test files to remote server..."; \ + sshpass -p '${PASSWORD}' ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${USERNAME}@${E2E_VM_SERVICE_NODE_IP} "mkdir -p /root/tests/e2e"; \ + sshpass -p '${PASSWORD}' scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -r ${PROJECT_DIR}/tests/e2e-test-framework ${USERNAME}@${E2E_VM_SERVICE_NODE_IP}:/root/tests/; \ + echo "Installing dependencies and running tests on remote server..."; \ + sshpass -p '${PASSWORD}' ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${USERNAME}@${E2E_VM_SERVICE_NODE_IP} '. /root/venv/python3.12.2/bin/activate && cd /root/tests/e2e-test-framework && pip3 install -r requirements.txt && pytest -m hal --junitxml=test_results_csi_baremetal.xml ${TEST_FILTER_SMART_INFO}'; \ + echo "Copying test results back to local machine..."; \ + sshpass -p '${PASSWORD}' scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -r ${USERNAME}@${E2E_VM_SERVICE_NODE_IP}:/root/tests/e2e-test-framework/test_results_csi_baremetal.xml ${PROJECT_DIR}/test_results_csi_baremetal.xml; \ + TEST_EXIT_CODE=$$?; \ + echo "Test exit code: $$TEST_EXIT_CODE"; \ + if [ -e "${PROJECT_DIR}/test_results_csi_baremetal.xml" ]; then \ + echo "Test results for csi-baremetal copied successfully."; \ + else \ + echo "Error: Failed to copy test results for csi-baremetal."; \ + fi; \ + if [ $$TEST_EXIT_CODE -eq 0 ]; then \ + echo "All tests for csi-baremetal passed successfully."; \ + echo "SUCCESS" > build_status.txt; \ + else \ + echo "Functional tests for csi-baremetal failed."; \ + echo "FAILURE" > build_status.txt; \ + fi; \ + +#cleanup test files on remote server +functional-tests-cleanup: + @echo "Cleaning up functional test files on remote server..."; \ + sshpass -p '${PASSWORD}' ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${USERNAME}@${E2E_VM_SERVICE_NODE_IP} 'rm -rf /root/tests/*'; \ + echo "Functional test cleanup completed." + +.PHONY: csi-baremetal-functional-tests +csi-baremetal-functional-tests: \ + functional-tests-cleanup \ + run-csi-baremetal-functional-tests \ + functional-tests-cleanup + @echo "Functional tests for csi-baremetal completed." \ No newline at end of file diff --git a/tests/e2e-test-framework/conftest.py b/tests/e2e-test-framework/conftest.py index f1ef01d06..5218bef18 100644 --- a/tests/e2e-test-framework/conftest.py +++ b/tests/e2e-test-framework/conftest.py @@ -1,15 +1,19 @@ import logging from datetime import datetime import pytest -from framework.description_plugin import DescriptionPlugin +from framework.test_description_plugin import TestDescriptionPlugin +from wiremock.testing.testcontainer import wiremock_container +from wiremock.constants import Config from framework.qtest_helper import QTestHelper -from framework.propagating_thread import PropagatingThread +from framework.docker_helper import Docker import re +from datetime import datetime +from framework.propagating_thread import PropagatingThread -@pytest.hookimpl(trylast=True) +@pytest.mark.trylast def pytest_configure(config): terminal_reporter = config.pluginmanager.getplugin('terminalreporter') - config.pluginmanager.register(DescriptionPlugin(terminal_reporter), 'testdescription') + config.pluginmanager.register(TestDescriptionPlugin(terminal_reporter), 'testdescription') # Configure log file logging log_file_suffix = '{:%Y_%m_%d_%H%M%S}.log'.format(datetime.now()) @@ -20,7 +24,7 @@ def pytest_configure(config): file_handler.setFormatter(file_formatter) logging.getLogger().addHandler(file_handler) - pytest.qtest_helper = QTestHelper(config.getoption("--qtest_token")) if config.getoption("--qtest_token") else None + pytest.qtest_helper = QTestHelper(config.getoption("--qtest_token"), config.getoption("--cmo_bundle_version")) if config.getoption("--qtest_token") else None pytest.tests_in_suite = {} pytest.threads = [] @@ -29,8 +33,11 @@ def pytest_addoption(parser): parser.addoption("--login", action="store", default="", help="Login") parser.addoption("--password", action="store", default="", help="Password") parser.addoption("--namespace", action="store", default="atlantic", help="Namespace") + parser.addoption("--hosts", action="store", default=[], help="Hosts") parser.addoption("--qtest_token", action="store", default="", help="qTest Token") + parser.addoption("--ansible_server", action="store", default="", help="Server") parser.addoption("--qtest_test_suite", action="store", default="", help="qTest Test Suite ID") + parser.addoption("--cmo_bundle_version", action="store", default="", help="Version of CMO bundle") def pytest_collection_modifyitems(config): qtest_token = config.getoption("--qtest_token") @@ -51,22 +58,16 @@ def pytest_sessionfinish(): if len(pytest.threads) == 0: return - suite_failed = False for thread in pytest.threads: - try: - thread.join() - except Exception: - suite_failed = True + thread.join() logging.info("[qTest] Summary") for thread in pytest.threads: if thread.has_failed(): logging.error(f"[qTest] {thread.test_name} {thread.get_target_name()} failed: {thread.exc}") - if not thread.has_failed(): + else: logging.info(f"[qTest] {thread.test_name} {thread.get_target_name()} success.") - assert not suite_failed, "One or more threads failed" - @pytest.fixture(scope="session") def vm_user(request): return request.config.getoption("--login") @@ -79,26 +80,52 @@ def vm_cred(request): def namespace(request): return request.config.getoption("--namespace") +@pytest.fixture(scope="session") +def hosts(request): + return request.config.getoption("--hosts") + +@pytest.fixture(scope="session") +def ansible_server(request): + return request.config.getoption("--ansible_server") + +@pytest.fixture(scope="session") +def wire_mock(): + if not Docker.is_docker_running(): + pytest.skip('Docker is not running. Please start docker.') + with wiremock_container(image="asdrepo.isus.emc.com:9042/wiremock:2.35.1-1", verify_ssl_certs=False) as wire_mock: + Config.base_url = wire_mock.get_url("__admin") + Config.requests_verify = False + yield wire_mock + @pytest.fixture(scope="function", autouse=True) def link_requirements_in_background(request): if pytest.qtest_helper is not None: requirements_thread = PropagatingThread(target=link_requirements, args=(request,), test_name=request.node.name) requirements_thread.start() pytest.threads.append(requirements_thread) - + def link_requirements(request): for marker in request.node.iter_markers(): - if marker.name == "requirements": - logging.info(f" [qTest] Test function {request.node.name} is associated with requirement: {marker.args}.") - test_case_pid, requirement_ids = marker.args - for requirement_id in requirement_ids: - pytest.qtest_helper.link_test_case_to_requirement(requirement_id, test_case_pid) - return + if marker.name == "requirements": + logging.info(f" [qTest] Test function {request.node.name} is associated with requirement: {marker.args}.") + test_case_pid, requirement_ids = marker.args + for requirement_id in requirement_ids: + pytest.qtest_helper.link_test_case_to_requirement(requirement_id, test_case_pid) + return logging.info(f"[qTest] Test function {request.node.name} is missing requirements marker.") @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item): report = (yield).get_result() + + if report.outcome == 'skipped' and pytest.qtest_helper is not None and item.config.getoption("--qtest_test_suite") != '': + update_thread = PropagatingThread(target=update_test_result, + args=(item.name, item.config.getoption("--qtest_test_suite"), report.outcome, datetime.now(), datetime.now()), + test_name=item.name) + update_thread.start() + pytest.threads.append(update_thread) + return + setattr(item, 'report', report) @@ -115,16 +142,17 @@ def update_test_results_in_background(request): yield test_end_date = datetime.now() + test_name = request.node.name + test_suite_id = request.config.getoption("--qtest_test_suite") + outcome = request.node.report.outcome + update_thread = PropagatingThread(target=update_test_result, - args=(request, test_start_date, test_end_date), + args=(test_name, test_suite_id, outcome, test_start_date, test_end_date), test_name=request.node.name) update_thread.start() pytest.threads.append(update_thread) -def update_test_result(request, test_start_date, test_end_date): - test_name = request.node.name - test_suite_id = request.config.getoption("--qtest_test_suite") - +def update_test_result(test_name, test_suite_id, outcome, test_start_date, test_end_date): match = re.match(r"test_(\d+)", test_name) if not match: @@ -136,11 +164,7 @@ def update_test_result(request, test_start_date, test_end_date): if test_case_id not in pytest.tests_in_suite: test_run_id = pytest.qtest_helper.add_test_run_to_test_suite(test_case_id, test_suite_id) - logging.info(f"[qTest] Added test case {test_case} to test suite {test_suite_id}") else: test_run_id = pytest.tests_in_suite[test_case_id] - pytest.qtest_helper.update_test_run_status_in_test_suite(test_run_id, request.node.report.outcome, test_start_date, test_end_date) - - logging.info(f"[qTest] Updated test run {test_run_id} with status {request.node.report.outcome}") - + pytest.qtest_helper.update_test_run_status_in_test_suite(test_run_id, outcome, test_start_date, test_end_date) \ No newline at end of file diff --git a/tests/e2e-test-framework/framework/docker_helper.py b/tests/e2e-test-framework/framework/docker_helper.py new file mode 100644 index 000000000..2343ffbcd --- /dev/null +++ b/tests/e2e-test-framework/framework/docker_helper.py @@ -0,0 +1,16 @@ +import logging +import docker + + +class Docker: + @classmethod + def is_docker_running(cls): + try: + client = docker.from_env() + client.ping() + + logging.info("\nDocker is running.") + return True + except Exception as exc: + logging.error(f"Error: {exc}") + return False diff --git a/tests/e2e-test-framework/framework/propagating_thread.py b/tests/e2e-test-framework/framework/propagating_thread.py index c0729297b..fa64190e6 100644 --- a/tests/e2e-test-framework/framework/propagating_thread.py +++ b/tests/e2e-test-framework/framework/propagating_thread.py @@ -28,8 +28,3 @@ def has_failed(self): def get_target_name(self): return self.target.__name__ - - def join(self, timeout=None): - super(PropagatingThread, self).join(timeout) - if self.exc is not None: - raise RuntimeError(f"{self.test_name} {self.exc}") diff --git a/tests/e2e-test-framework/framework/qtest_helper.py b/tests/e2e-test-framework/framework/qtest_helper.py index 7471df4dc..186232c15 100644 --- a/tests/e2e-test-framework/framework/qtest_helper.py +++ b/tests/e2e-test-framework/framework/qtest_helper.py @@ -1,108 +1,257 @@ import json +import logging import requests # Swagger # https://qtest.dev.tricentis.com/ -class QTestHelper(): - def __init__(self, qtest_token): +class QTestHelper: + @classmethod + def __init__(cls, qtest_token, bundle_version=""): - self.api_base_url = "https://qtest.gtie.dell.com/api/v3" - self.project_id = 367 # CMO project + cls.api_base_url = "https://qtest.gtie.dell.com/api/v3" + cls.project_id = 367 # CMO project + cls.bundle_version = bundle_version + cls.default_timeout = 10 + cls.max_retries = 2 # maps from pytest outcome to qTest status code - self.status_codes = { + cls.status_codes = { "passed": 601, "failed": 602, - "skipped": 603 # 603 - incomplete + "skipped": 603, # 603 - incomplete } - self.access_token = qtest_token + cls.access_token = qtest_token - self.headers = { - "Authorization": f"Bearer {self.access_token}", - "Content-Type": "application/json" + cls.headers = { + "Authorization": f"Bearer {cls.access_token}", + "Content-Type": "application/json", } - self.timeout = 10 - def link_test_case_to_requirement(self, jira_id, test_case_pid): + logging.info(f"[qTest] Link test case {test_case_pid} to requirement") requirement_id = self.get_requirement_id_by_jira_id(jira_id) req_link_endpoint = f"{self.api_base_url}/projects/{self.project_id}/requirements/{requirement_id}/link?type=test-cases" test_case_id = self.get_test_case_pid_by_id(test_case_pid) body = [test_case_id] - response = requests.post(req_link_endpoint, headers=self.headers, data=json.dumps(body), timeout=self.timeout) - response.raise_for_status() + exception = None + for _ in range(self.max_retries): + try: + response = requests.post( + req_link_endpoint, + headers=self.headers, + data=json.dumps(body), + timeout=self.default_timeout, + ) + response.raise_for_status() + logging.info( + f"[qTest] Test cases {test_case_pid} [{test_case_id}] linked to requirement {jira_id} [{requirement_id}] successfully." + ) + return + except requests.HTTPError as exc: + exception = exc + logging.warning( + f"[qTest] Failed to link test case {test_case_pid} to requirement {jira_id}. Retrying..." + ) - print(f"\tTest cases {test_case_pid} [{test_case_id}] linked to requirement {jira_id} [{requirement_id}] successfully.") + raise exception def get_test_case_pid_by_id(self, test_case_pid): + logging.info(f"[qTest] Get test case {test_case_pid} by id") test_cases_endpoint = f"{self.api_base_url}/projects/{self.project_id}/test-cases/{test_case_pid}" - response = requests.get(test_cases_endpoint, headers=self.headers, timeout=self.timeout) - response.raise_for_status() - return response.json()['id'] + exception = None + for _ in range(self.max_retries): + try: + response = requests.get( + test_cases_endpoint, + headers=self.headers, + timeout=self.default_timeout, + ) + response.raise_for_status() + + logging.info( + f"[qTest] Get test case {test_case_pid} by id completed." + ) + return response.json()["id"] + except requests.HTTPError as exc: + exception = exc + logging.warning( + f"[qTest] Failed to get test case {test_case_pid} by id. Retrying..." + ) + + raise exception def get_requirement_id_by_jira_id(self, jira_id): - search_endpoint = f"{self.api_base_url}/projects/{self.project_id}/search" + logging.info(f"[qTest] Get requirement id by jira id {jira_id}") + search_endpoint = ( + f"{self.api_base_url}/projects/{self.project_id}/search" + ) payload = { "object_type": "requirements", - "fields": [ - "id" - ], - "query": f"'name' ~ '{jira_id}'" + "fields": ["id"], + "query": f"'name' ~ '{jira_id}'", } - response = requests.post(search_endpoint, headers=self.headers, json=payload, timeout=self.timeout) - response.raise_for_status() + exception = None + for _ in range(self.max_retries): + try: + response = requests.post( + search_endpoint, + headers=self.headers, + json=payload, + timeout=self.default_timeout, + ) + response.raise_for_status() + + logging.info( + f"[qTest] Get requirement id by jira id {jira_id} completed." + ) + return response.json()["items"][0]["id"] + except requests.HTTPError as exc: + exception = exc + logging.warning( + f"[qTest] Failed to get requirement id by jira id {jira_id}. Retrying..." + ) - return response.json()['items'][0]['id'] + raise exception def get_tests_from_suite(self, test_suite_id): + logging.info(f"[qTest] Get tests from test suite {test_suite_id}") test_runs_endpoint = f"{self.api_base_url}/projects/{self.project_id}/test-runs/?parentId={test_suite_id}&parentType=test-suite" - response = requests.get(test_runs_endpoint, headers=self.headers, timeout=self.timeout) - response.raise_for_status() + exception = None + for _ in range(self.max_retries): + try: + response = requests.get( + test_runs_endpoint, + headers=self.headers, + timeout=self.default_timeout, + ) + response.raise_for_status() - tests_in_suite = {} - for test_run in response.json()['items']: - tests_in_suite[test_run['testCaseId']] = test_run['id'] + tests_in_suite = {} + for test_run in response.json()["items"]: + tests_in_suite[test_run["testCaseId"]] = test_run["id"] - return tests_in_suite + logging.info( + f"[qTest] Get tests from test suite {test_suite_id} completed." + ) + return tests_in_suite + except requests.HTTPError as exc: + exception = exc + logging.warning( + f"[qTest] Failed to get tests from test suite {test_suite_id}. Retrying..." + ) + + raise exception def add_test_run_to_test_suite(self, test_case_pid, test_suite_id): + logging.info( + f"[qTest] Add test case {test_case_pid} to test suite {test_suite_id}" + ) get_test_case_endpoint = f"{self.api_base_url}/projects/{self.project_id}/test-cases/{test_case_pid}" - response = requests.get(get_test_case_endpoint, headers=self.headers, timeout=self.timeout) - response.raise_for_status() + exception = None + for _ in range(self.max_retries): + try: + response = requests.get( + get_test_case_endpoint, + headers=self.headers, + timeout=self.default_timeout, + ) + response.raise_for_status() + + logging.info( + f"[qTest] Test case {test_case_pid} added to test suite {test_suite_id}" + ) + break + except requests.HTTPError as exc: + exception = exc + logging.warning( + f"[qTest] Failed to add test case {test_case_pid} to test suite {test_suite_id}. Retrying..." + ) + + if exception is not None: + raise exception add_test_case_to_test_suite_endpoint = f"{self.api_base_url}/projects/{self.project_id}/test-runs?parentId={test_suite_id}&parentType=test-suite" payload = { - "name": response.json()['name'], + "name": response.json()["name"], "test_case": { "id": test_case_pid, - } + }, } - response = requests.post(add_test_case_to_test_suite_endpoint, headers=self.headers, data=json.dumps(payload), timeout=self.timeout) - response.raise_for_status() - return response.json()['id'] + for _ in range(self.max_retries): + try: + response = requests.post( + add_test_case_to_test_suite_endpoint, + headers=self.headers, + data=json.dumps(payload), + timeout=self.default_timeout, + ) + response.raise_for_status() + logging.info( + f"[qTest] Test case {test_case_pid} added to test suite {test_suite_id}" + ) + return response.json()["id"] + except requests.HTTPError as exc: + exception = exc + logging.warning( + f"[qTest] Failed to add test case {test_case_pid} to test suite {test_suite_id}. Retrying..." + ) + + raise exception - def update_test_run_status_in_test_suite(self, test_run_id, status, test_start_date, test_end_date): + def update_test_run_status_in_test_suite( + self, test_run_id, status, test_start_date, test_end_date + ): + logging.info( + f"[qTest] Update test run {test_run_id} with status {status}" + ) update_test_logs_endpoint = f"{self.api_base_url}/projects/{self.project_id}/test-runs/{test_run_id}/test-logs" payload = { - "exe_start_date": test_start_date.isoformat().split(".")[0] + "+00:00", + "exe_start_date": test_start_date.isoformat().split(".")[0] + + "+00:00", "exe_end_date": test_end_date.isoformat().split(".")[0] + "+00:00", - "status": { - "id": self.status_codes[status] - }, + "status": {"id": self.status_codes[status]}, } - response = requests.post(update_test_logs_endpoint, headers=self.headers, data=json.dumps(payload), timeout=self.timeout) - response.raise_for_status() + if self.bundle_version != "": + payload["properties"] = [ + { + "field_id": 103442, # bundle version + "field_value": self.bundle_version, + } + ] + + exception = None + for _ in range(self.max_retries): + try: + response = requests.post( + update_test_logs_endpoint, + headers=self.headers, + data=json.dumps(payload), + timeout=self.default_timeout, + ) + response.raise_for_status() + + logging.info( + f"[qTest] Test run {test_run_id} status updated successfully." + ) + return + except requests.HTTPError as exc: + exception = exc + logging.warning( + f"[qTest] Failed to update test run {test_run_id} status. Retrying..." + ) + + raise exception diff --git a/tests/e2e-test-framework/framework/description_plugin.py b/tests/e2e-test-framework/framework/test_description_plugin.py similarity index 95% rename from tests/e2e-test-framework/framework/description_plugin.py rename to tests/e2e-test-framework/framework/test_description_plugin.py index 4a69c43b5..9020013bd 100644 --- a/tests/e2e-test-framework/framework/description_plugin.py +++ b/tests/e2e-test-framework/framework/test_description_plugin.py @@ -2,7 +2,7 @@ import pytest -class DescriptionPlugin: +class TestDescriptionPlugin: def __init__(self, terminal_reporter: str): self.terminal_reporter = terminal_reporter diff --git a/tests/e2e-test-framework/requirements.txt b/tests/e2e-test-framework/requirements.txt index 9ba119abd..977a381aa 100644 --- a/tests/e2e-test-framework/requirements.txt +++ b/tests/e2e-test-framework/requirements.txt @@ -1,7 +1,11 @@ -pytest==8.1.1 +pytest==8.2.1 pytest-html-reporter==0.2.9 kubernetes==29.0.0 tox==4.14.2 flake8==7.0.0 -pylint==3.1.0 +pylint==3.2.2 paramiko==3.4.0 +wiremock==2.6.1 +docker==7.1.0 +requests==2.32.2 +testcontainers==4.5.1