diff --git a/tests/conftest.py b/tests/conftest.py index 9f0bfa25..94fdd73e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2,14 +2,11 @@ # SPDX-License-Identifier: Apache-2.0 from collections.abc import Generator -from contextlib import contextmanager from pathlib import Path -from typing import Any, cast +from typing import Optional, cast import pytest import yaml -from sqlalchemy import Engine, create_engine -from sqlalchemy.orm import Session, sessionmaker from tdp.core.constants import ( DAG_DIRECTORY_NAME, @@ -18,13 +15,27 @@ PLAYBOOKS_DIRECTORY_NAME, YML_EXTENSION, ) -from tdp.core.models import BaseModel, init_database def pytest_addoption(parser: pytest.Parser) -> None: + """Add custom command-line options for pytest. + + This function adds the --database-dsn option that allows specifying multiple + database data source names (DSNs) for testing. The option can be used multiple + times to test against different database backends. The default value is "sqlite", + it will always be present in the list of DSNs. + + Usage examples: + # Test only with sqlite (default behavior) + pytest tests + # The resulting list will be: ["sqlite"] + + # Test with sqlite and postgresql + pytest tests --database-dsn postgresql://user:pass@localhost/testdb + # The resulting list will be: ["sqlite", "postgresql://user:pass@localhost/testdb"] + """ parser.addoption( "--database-dsn", - dest="database_dsn", action="append", default=["sqlite"], help="Add database DSN.", @@ -32,24 +43,21 @@ def pytest_addoption(parser: pytest.Parser) -> None: def pytest_generate_tests(metafunc: pytest.Metafunc) -> None: - """Pytest hook to generate tests based on the database dsn option.""" + """Pytest hook to generate tests based on the --database-dsn option.""" if "db_dsn" in metafunc.fixturenames: + database_dsns = cast(list, metafunc.config.getoption("--database-dsn")) metafunc.parametrize( "db_dsn", - metafunc.config.getoption("database_dsn"), - indirect=True, # type: ignore + database_dsns, + indirect=True, ) @pytest.fixture -def db_dsn( - request: pytest.FixtureRequest, tmp_path: Path -) -> Generator[str, None, None]: +def db_dsn(request, tmp_path) -> Generator[str, None, None]: """Return a database dsn. - Ensure that the database is cleaned up after each test is done. - - We create a temp path instead of the default in-memory sqlite database as some test + Create a temp path instead of the default in-memory sqlite database as some test need to generate several engine instances (which will loose the data between them). Concerned tests are CLI tests that need to perform a `tdp init` at the beginning of the test. @@ -58,54 +66,26 @@ def db_dsn( # Assign a temporary database for sqlite if database_dsn == "sqlite": database_dsn = f"sqlite:///{tmp_path / 'test.db'}" - yield database_dsn - - -@pytest.fixture() -def db_engine( - db_dsn: str, request: pytest.FixtureRequest -) -> Generator[Engine, None, None]: - """Create a database engine and optionnally by default initialize the schema.""" - engine = create_engine(db_dsn) - if request.param: - init_database(engine) - yield engine - if request.param: - BaseModel.metadata.drop_all(engine) - engine.dispose() + yield database_dsn -@contextmanager -def create_session(engine: Engine) -> Generator[Session, None, None]: - """Utility function to create a session.""" - Session = sessionmaker(bind=engine) - session = Session() - try: - yield session - finally: - session.close() +def init_dag_directory(path: Path, dag: dict[str, list]) -> None: + """Create and populate the DAG directory with service DAG files.""" + for service_name, operations in dag.items(): + # Save the dag + with (path / (service_name + YML_EXTENSION)).open("w") as fd: + yaml.dump(operations, fd) -def generate_collection_at_path( - path: Path, - dag: dict[str, list], - vars: dict[str, dict[str, dict]], -) -> None: - """Generate a collection at a given path.""" - (dag_dir := path / DAG_DIRECTORY_NAME).mkdir() - (playbooks_dir := path / PLAYBOOKS_DIRECTORY_NAME).mkdir() - (tdp_vars_defaults_dir := path / DEFAULT_VARS_DIRECTORY_NAME).mkdir() +def init_playbooks_directory(path: Path, dag: dict[str, list]) -> None: + """Create and populate the playbooks directory with operation playbooks.""" # Minimal playbook which will be used for operations minimal_playbook = [ {"hosts": "localhost"}, ] for service_name, operations in dag.items(): - # Save the dag - with (dag_dir / (service_name + YML_EXTENSION)).open("w") as fd: - yaml.dump(operations, fd) - # Save playbooks for operation in operations: # Do not generate playbooks for noop operations @@ -114,40 +94,56 @@ def generate_collection_at_path( # Generate and save stop and restart playbooks for each start operation if operation["name"].endswith("_start"): with ( - playbooks_dir + path / (operation["name"].rstrip("_start") + "_restart" + YML_EXTENSION) ).open("w") as fd: yaml.dump(minimal_playbook, fd) with ( - playbooks_dir + path / (operation["name"].rstrip("_start") + "_stop" + YML_EXTENSION) ).open("w") as fd: yaml.dump(minimal_playbook, fd) # Save the playbook - with (playbooks_dir / (operation["name"] + YML_EXTENSION)).open("w") as fd: + with (path / (operation["name"] + YML_EXTENSION)).open("w") as fd: yaml.dump(minimal_playbook, fd) # Save the sleep playbook - with (playbooks_dir / (OPERATION_SLEEP_NAME + YML_EXTENSION)).open("w") as fd: + with (path / (OPERATION_SLEEP_NAME + YML_EXTENSION)).open("w") as fd: yaml.dump(minimal_playbook, fd) - # Save the vars + +def init_default_vars_directory(path: Path, vars: dict[str, dict[str, dict]]) -> None: + """Create and populate the default vars directory with service variables.""" for service_name, file_vars in vars.items(): - service_dir = tdp_vars_defaults_dir / service_name + service_dir = path / service_name service_dir.mkdir() for filename, vars in file_vars.items(): + if not filename.endswith(YML_EXTENSION): + filename += YML_EXTENSION with (service_dir / filename).open("w") as fd: - yaml.dump(vars, fd) + yaml.dump(vars, fd, sort_keys=False) -def assert_equal_values_in_model(model1: Any, model2: Any) -> bool: - """SQLAlchemy asserts that two identical objects of type DeclarativeBase parent of the BaseModel class, - which is used in TDP as pattern for the table models, are identical if they are compared in the same session, - but different if compared in two different sessions. +def generate_collection_at_path( + path: Path, + dag: Optional[dict[str, list]] = None, + vars: Optional[dict[str, dict[str, dict]]] = None, +) -> Path: + """Generate a collection at a given path.""" + path.mkdir(parents=True, exist_ok=True) - This function therefore transforms the tables into dictionaries and by parsing the coulumns compares their values. - """ - if isinstance(model1, BaseModel) and isinstance(model2, BaseModel): - return model1.to_dict() == model2.to_dict() - else: - return False + # Dag + (dag_dir := path / DAG_DIRECTORY_NAME).mkdir(parents=True) + if dag: + init_dag_directory(dag_dir, dag) + + # Playbooks + (playbooks_dir := path / PLAYBOOKS_DIRECTORY_NAME).mkdir() + if dag: + init_playbooks_directory(playbooks_dir, dag) + + # Default vars + (tdp_vars_defaults_dir := path / DEFAULT_VARS_DIRECTORY_NAME).mkdir(parents=True) + if vars: + init_default_vars_directory(tdp_vars_defaults_dir, vars) + return path diff --git a/tests/e2e/conftest.py b/tests/e2e/conftest.py index e7da4af9..8dd2553e 100644 --- a/tests/e2e/conftest.py +++ b/tests/e2e/conftest.py @@ -3,63 +3,105 @@ from collections.abc import Generator from pathlib import Path -from typing import NamedTuple +from typing import Callable import pytest -from click.testing import CliRunner -from sqlalchemy import create_engine +from click.testing import CliRunner, Result -from tdp.cli.commands.init import init -from tdp.core.models import BaseModel -from tests.conftest import generate_collection_at_path +from tdp.cli.__main__ import cli +from tdp.core.constants import ( + DAG_DIRECTORY_NAME, + DEFAULT_VARS_DIRECTORY_NAME, + PLAYBOOKS_DIRECTORY_NAME, +) +from tests.conftest import ( + generate_collection_at_path, + init_dag_directory, + init_default_vars_directory, + init_playbooks_directory, +) -class TDPInitArgs(NamedTuple): - collection_path: Path - db_dsn: str - vars: Path +@pytest.fixture +def runner() -> Generator[CliRunner, None, None]: + """Fixture to provide a Click test runner.""" + runner = CliRunner(env={"TDP_MOCK_DEPLOY": "True"}) + # Run tests in an isolated filesystem to avoid side effects (e.g. local .env file) + with runner.isolated_filesystem(): + yield runner @pytest.fixture -def tdp_init( - collection_path: Path, db_dsn: str, vars: Path -) -> Generator[TDPInitArgs, None, None]: - base_args = [ - "--collection-path", - str(collection_path), - "--database-dsn", - db_dsn, - "--vars", - str(vars), - ] - runner = CliRunner() - runner.invoke(init, base_args) - yield TDPInitArgs(collection_path, db_dsn, vars) - engine = create_engine(db_dsn) - BaseModel.metadata.drop_all(engine) - engine.dispose() +def tdp(runner): + """Fixture to provide a function that invokes the TDP CLI with given arguments.""" + + def invoke(args: str) -> Result: + return runner.invoke(cli, args.split()) + + return invoke + + +@pytest.fixture +def vars(tmp_path) -> Path: + """Fixture to create a temporary directory for storing variable files.""" + vars_path = tmp_path / "vars" + vars_path.mkdir(parents=True) + return vars_path + + +class CollectionPath: + """A collection path object that provides methods to create collection directories.""" + + def __init__(self, path: Path): + self.path = path + generate_collection_at_path(self.path) + + def __str__(self) -> str: + """Return the path as a string.""" + return str(self.path) + + def init_dag_directory(self, dag: dict[str, list]) -> None: + """Create and populate the DAG directory with service DAG files.""" + init_dag_directory(self.path / DAG_DIRECTORY_NAME, dag) + + def init_playbooks_directory(self, dag: dict[str, list]) -> None: + """Create and populate the playbooks directory with operation playbooks.""" + init_playbooks_directory(self.path / PLAYBOOKS_DIRECTORY_NAME, dag) + + def init_default_vars_directory(self, vars: dict[str, dict[str, dict]]) -> None: + """Create and populate the default vars directory with service variables.""" + init_default_vars_directory(self.path / DEFAULT_VARS_DIRECTORY_NAME, vars) @pytest.fixture -def collection_path(tmp_path_factory: pytest.TempPathFactory) -> Path: - collection_path = tmp_path_factory.mktemp("collection") - dag_service_operations = { - "service": [ - {"name": "service_install"}, - {"name": "service_config", "depends_on": ["service_install"]}, - {"name": "service_start", "depends_on": ["service_config"]}, - {"name": "service_init", "depends_on": ["service_start"]}, - ], - } - service_vars = { - "service": { - "service.yml": {}, - }, - } - generate_collection_at_path(collection_path, dag_service_operations, service_vars) - return collection_path +def collection_path_factory( + tmp_path, +) -> Generator[Callable[[], CollectionPath], None, None]: + """Fixture that provides a factory function for creating CollectionPath objects. + + The factory function can be called multiple times within a test to create multiple + collections. Each call will create a new temporary directory with a unique name. + + Returns: + A factory function that, when called, returns a new CollectionPath object. + + Example usage: + def test_something(collection_path_factory): + collection1 = collection_path_factory() + collection2 = collection_path_factory() + # Both collections are separate with unique paths + """ + collection_counter = 0 + + def _create_collection_path() -> CollectionPath: + nonlocal collection_counter + collection_counter += 1 + collection_dir = tmp_path / f"collection_{collection_counter}" + return CollectionPath(collection_dir) + + yield _create_collection_path @pytest.fixture -def vars(tmp_path_factory: pytest.TempPathFactory) -> Path: - return tmp_path_factory.mktemp("collection") +def collection_path(collection_path_factory) -> CollectionPath: + return collection_path_factory() diff --git a/tests/e2e/test_tdp.py b/tests/e2e/test_tdp.py new file mode 100644 index 00000000..cd220fe4 --- /dev/null +++ b/tests/e2e/test_tdp.py @@ -0,0 +1,98 @@ +# Copyright 2022 TOSIT.IO +# SPDX-License-Identifier: Apache-2.0 + +"""Integration tests for the main CLI application. + +This module tests CLI-level functionality including: +- Global options (--env, --log-level, --cwd) +- Command routing and parsing +- Main CLI help system +- Cross-command consistency +""" + +import pytest + +from tdp.cli.__main__ import cli + + +def test_main_cli_help(runner): + """Test that the main CLI shows help with available commands.""" + result = runner.invoke(cli, ["--help"]) + assert result.exit_code == 0 + assert "Usage: tdp [OPTIONS] COMMAND [ARGS]..." in result.output + # Check that main commands are listed + assert "init" in result.output + assert "deploy" in result.output + assert "plan" in result.output + assert "status" in result.output + + +def test_main_cli_short_help(runner): + """Test that the main CLI supports -h shortcut.""" + result = runner.invoke(cli, ["-h"]) + assert result.exit_code == 0 + assert "Usage: tdp [OPTIONS] COMMAND [ARGS]..." in result.output + + +@pytest.mark.parametrize( + "command", + [ + "init", + "deploy", + "dag", + "default-diff", + "ops", + "browse", + ], +) +def test_command_help_via_cli(runner, command): + """Test that all main commands show help when invoked via main CLI.""" + result = runner.invoke(cli, [command, "--help"]) + assert result.exit_code == 0 + assert "Usage:" in result.output + + +@pytest.mark.parametrize( + "subcommand_group,subcommand", + [ + ("plan", "dag"), + ("plan", "ops"), + ("plan", "reconfigure"), + ("status", "show"), + ("status", "edit"), + ("vars", "edit"), + ("vars", "validate"), + ], +) +def test_subcommand_help_via_cli(runner, subcommand_group, subcommand): + """Test that all subcommands show help when invoked via main CLI.""" + result = runner.invoke(cli, [subcommand_group, subcommand, "--help"]) + assert result.exit_code == 0 + assert "Usage:" in result.output + + +def test_global_log_level_option(runner): + """Test that global --log-level option works.""" + result = runner.invoke(cli, ["--log-level", "DEBUG", "--help"]) + assert result.exit_code == 0 + + +def test_invalid_command(runner): + """Test that invalid commands show appropriate error.""" + result = runner.invoke(cli, ["invalid-command"]) + assert result.exit_code != 0 + assert "No such command" in result.output + + +def test_command_routing_consistency(runner): + """Test that command routing works consistently across all commands.""" + # Test that we can reach nested commands + result = runner.invoke(cli, ["plan", "--help"]) + assert result.exit_code == 0 + assert "dag" in result.output + assert "ops" in result.output + + result = runner.invoke(cli, ["status", "--help"]) + assert result.exit_code == 0 + assert "show" in result.output + assert "edit" in result.output diff --git a/tests/e2e/test_tdp_default_diff.py b/tests/e2e/test_tdp_default_diff.py index 37d7eda7..3beabcf8 100644 --- a/tests/e2e/test_tdp_default_diff.py +++ b/tests/e2e/test_tdp_default_diff.py @@ -1,20 +1,11 @@ # Copyright 2022 TOSIT.IO # SPDX-License-Identifier: Apache-2.0 -from pathlib import Path - -from click.testing import CliRunner - from tdp.cli.commands.default_diff import default_diff -def test_tdp_default_diff(collection_path: Path, vars: Path): - args = [ - "--collection-path", - collection_path, - "--vars", - vars, - ] - runner = CliRunner() - result = runner.invoke(default_diff, args) +def test_tdp_default_diff(runner, collection_path, vars): + result = runner.invoke( + default_diff, f"--collection-path {collection_path} --vars {vars}".split() + ) assert result.exit_code == 0, result.output diff --git a/tests/e2e/test_tdp_deploy.py b/tests/e2e/test_tdp_deploy.py index 3183b088..e7051b3f 100644 --- a/tests/e2e/test_tdp_deploy.py +++ b/tests/e2e/test_tdp_deploy.py @@ -1,37 +1,36 @@ # Copyright 2022 TOSIT.IO # SPDX-License-Identifier: Apache-2.0 -from click.testing import CliRunner - from tdp.cli.commands.deploy import deploy -from tdp.cli.commands.plan.dag import dag -from tests.e2e.conftest import TDPInitArgs -def test_tdp_deploy_mock( - tdp_init: TDPInitArgs, -): - runner = CliRunner() - result = runner.invoke( - dag, - [ - "--collection-path", - str(tdp_init.collection_path), - "--database-dsn", - tdp_init.db_dsn, - ], +def test_tdp_deploy_mock(runner, tdp, collection_path, db_dsn, vars): + collection_path.init_dag_directory( + { + "service": [ + {"name": "service_install"}, + ], + } + ) + collection_path.init_default_vars_directory( + { + "service": { + "service.yml": {}, + }, + } + ) + + result = tdp( + f"init --collection-path {collection_path} --database-dsn {db_dsn} --vars {vars}" + ) + assert result.exit_code == 0, result.output + result = tdp( + f"plan dag --collection-path {collection_path} --database-dsn {db_dsn}" ) assert result.exit_code == 0, result.output + result = runner.invoke( deploy, - [ - "--collection-path", - str(tdp_init.collection_path), - "--database-dsn", - tdp_init.db_dsn, - "--vars", - str(tdp_init.vars), - "--mock-deploy", - ], + f"--collection-path {collection_path} --database-dsn {db_dsn} --vars {vars}".split(), ) assert result.exit_code == 0, result.output diff --git a/tests/e2e/test_tdp_init.py b/tests/e2e/test_tdp_init.py index 1ee435e7..6620ac1a 100644 --- a/tests/e2e/test_tdp_init.py +++ b/tests/e2e/test_tdp_init.py @@ -1,25 +1,108 @@ # Copyright 2022 TOSIT.IO # SPDX-License-Identifier: Apache-2.0 -import os -from pathlib import Path +from tdp.cli.commands.init import init +from tdp.core.repository.git_repository import GitRepository -from click.testing import CliRunner -from tdp.cli.commands.init import init +def test_tdp_init_missing_collection_path(runner, db_dsn, vars): + """Test that the init command fails when collection path is not provided.""" + result = runner.invoke(init, f"--database-dsn {db_dsn} --vars {vars}".split()) + assert result.exit_code != 0 + assert "Error: Missing option '--collection-path'." in result.output + + +def test_tdp_init_missing_database_dsn(runner, collection_path, vars): + """Test that the init command fails when database DSN is not provided.""" + result = runner.invoke( + init, f"--collection-path {collection_path} --vars {vars}".split() + ) + assert result.exit_code != 0 + assert "Error: Missing option '--database-dsn'." in result.output + + +def test_tdp_init_missing_vars_dir(runner, collection_path, db_dsn): + """Test that the init command fails when vars directory is not provided.""" + result = runner.invoke( + init, f"--collection-path {collection_path} --database-dsn {db_dsn}".split() + ) + assert result.exit_code != 0 + assert "Error: Missing option '--vars'." in result.output + + +def test_tdp_init_variables_single_collection(runner, vars, db_dsn, collection_path): + """Test that the init command runs successfully with valid parameters and service.""" + collection = collection_path + collection.init_default_vars_directory( + { + "s1": { + "s1.yml": {"foo": "value"}, + } + } + ) + + result = runner.invoke( + init, + f"--collection-path {collection} --database-dsn {db_dsn} --vars {vars}".split(), + ) + assert result.exit_code == 0, result.output + assert vars.joinpath("s1", "s1.yml").exists() + assert GitRepository(vars / "s1"), "Git repository should be initialized in vars/s1" + repo = GitRepository(vars / "s1") + assert repo.is_clean(), "Git repository should be clean after initialization" + assert repo.current_version(), "Git repository should have a current version" + assert len(list(repo._repo.iter_commits())) == 1, ( + "Git repository should have one commit after initialization" + ) + assert vars.joinpath("s1", "s1.yml").read_text() == "foo: value\n" + +def test_tdp_init_variables_multiple_collections( + runner, collection_path_factory, vars, db_dsn +): + """Test that the init command runs successfully with valid parameters and multiple collections.""" + collection1 = collection_path_factory() + collection1.init_default_vars_directory( + { + "s1": { + "s1.yml": {"foo": "value", "bar": "other value"}, + } + } + ) + collection2 = collection_path_factory() + collection2.init_default_vars_directory( + { + "s1": { + "s1.yml": {"foo": "new value"}, + }, + "s2": { + "s2_c1.yml": {"baz": "value"}, + }, + } + ) -def test_tdp_init_db_is_created(collection_path: Path, vars: Path, tmp_path: Path): - db_path = tmp_path / "sqlite.db" - args = [ - "--collection-path", - str(collection_path), - "--database-dsn", - "sqlite:///" + str(db_path), - "--vars", - str(vars), - ] - runner = CliRunner() - result = runner.invoke(init, args) - assert os.path.exists(db_path) == True + result = runner.invoke( + init, + f"--collection-path {collection1} --collection-path {collection2} --database-dsn {db_dsn} --vars {vars}".split(), + ) assert result.exit_code == 0, result.output + assert vars.joinpath("s1", "s1.yml").exists() + assert vars.joinpath("s2", "s2_c1.yml").exists() + assert GitRepository(vars / "s1"), "Git repository should be initialized in vars/s1" + assert GitRepository(vars / "s2"), "Git repository should be initialized in vars/s2" + repo_s1 = GitRepository(vars / "s1") + repo_s2 = GitRepository(vars / "s2") + assert repo_s1.is_clean(), "Git repository s1 should be clean after initialization" + assert repo_s2.is_clean(), "Git repository s2 should be clean after initialization" + assert repo_s1.current_version(), "Git repository s1 should have a current version" + assert repo_s2.current_version(), "Git repository s2 should have a current version" + assert len(list(repo_s1._repo.iter_commits())) == 2, ( + "Git repository s1 should have one commit after initialization" + ) + assert len(list(repo_s2._repo.iter_commits())) == 1, ( + "Git repository s2 should have one commit after initialization" + ) + assert ( + vars.joinpath("s1", "s1.yml").read_text() + == "foo: new value\nbar: other value\n" + ) diff --git a/tests/e2e/test_tdp_ops.py b/tests/e2e/test_tdp_ops.py index 59fbbf8e..082d42e2 100644 --- a/tests/e2e/test_tdp_ops.py +++ b/tests/e2e/test_tdp_ops.py @@ -1,15 +1,9 @@ # Copyright 2022 TOSIT.IO # SPDX-License-Identifier: Apache-2.0 -from pathlib import Path - -from click.testing import CliRunner - from tdp.cli.commands.ops import ops -def test_tdp_nodes(collection_path: Path): - args = ["--collection-path", collection_path] - runner = CliRunner() - result = runner.invoke(ops, args) +def test_tdp_nodes(runner, collection_path): + result = runner.invoke(ops, f"--collection-path {collection_path}".split()) assert result.exit_code == 0, result.output diff --git a/tests/e2e/test_tdp_plan_dag.py b/tests/e2e/test_tdp_plan_dag.py index 20979afc..620c49af 100644 --- a/tests/e2e/test_tdp_plan_dag.py +++ b/tests/e2e/test_tdp_plan_dag.py @@ -1,24 +1,23 @@ # Copyright 2022 TOSIT.IO # SPDX-License-Identifier: Apache-2.0 - -from click.testing import CliRunner - from tdp.cli.commands.plan.dag import dag -from tests.e2e.conftest import TDPInitArgs -def test_tdp_plan_dag( - tdp_init: TDPInitArgs, -): - runner = CliRunner() +def test_tdp_plan_dag(tdp, runner, collection_path, db_dsn, vars): + collection_path.init_dag_directory( + { + "service": [ + {"name": "service_install"}, + ] + } + ) + result = tdp( + f"init --collection-path {collection_path} --vars {vars} --database-dsn {db_dsn}" + ) + assert result.exit_code == 0, result.output + result = runner.invoke( - dag, - [ - "--collection-path", - str(tdp_init.collection_path), - "--database-dsn", - tdp_init.db_dsn, - ], + dag, f"--collection-path {collection_path} --database-dsn {db_dsn}".split() ) assert result.exit_code == 0, result.output diff --git a/tests/e2e/test_tdp_plan_ops.py b/tests/e2e/test_tdp_plan_ops.py index 4bfaa9f9..8d427df7 100644 --- a/tests/e2e/test_tdp_plan_ops.py +++ b/tests/e2e/test_tdp_plan_ops.py @@ -1,25 +1,24 @@ # Copyright 2022 TOSIT.IO # SPDX-License-Identifier: Apache-2.0 - -from click.testing import CliRunner - from tdp.cli.commands.plan.ops import ops -from tests.e2e.conftest import TDPInitArgs -def test_tdp_plan_run( - tdp_init: TDPInitArgs, -): - runner = CliRunner() +def test_tdp_plan_run(tdp, vars, runner, collection_path, db_dsn): + collection_path.init_dag_directory( + { + "service": [ + {"name": "service_install"}, + ] + } + ) + result = tdp( + f"init --collection-path {collection_path} --vars {vars} --database-dsn {db_dsn}" + ) + assert result.exit_code == 0, result.output + result = runner.invoke( ops, - [ - "--collection-path", - str(tdp_init.collection_path), - "--database-dsn", - tdp_init.db_dsn, - "service_install", - ], + f"--collection-path {collection_path} --database-dsn {db_dsn} service_install".split(), ) assert result.exit_code == 0, result.output diff --git a/tests/e2e/test_tdp_plan_reconfigure.py b/tests/e2e/test_tdp_plan_reconfigure.py index 4a429da0..cecc8ca5 100644 --- a/tests/e2e/test_tdp_plan_reconfigure.py +++ b/tests/e2e/test_tdp_plan_reconfigure.py @@ -1,26 +1,12 @@ # Copyright 2022 TOSIT.IO # SPDX-License-Identifier: Apache-2.0 - -from click.testing import CliRunner - from tdp.cli.commands.plan.reconfigure import reconfigure -from tests.e2e.conftest import TDPInitArgs -def test_tdp_plan_reconfigure( - tdp_init: TDPInitArgs, -): - runner = CliRunner() +def test_tdp_plan_reconfigure(runner, collection_path, db_dsn): result = runner.invoke( reconfigure, - [ - "--collection-path", - str(tdp_init.collection_path), - "--database-dsn", - tdp_init.db_dsn, - ], + f"--collection-path {collection_path} --database-dsn {db_dsn}".split(), ) - assert result.exit_code == 1, ( - result.output - ) # No stale components, hence nothing to reconfigure. + assert result.exit_code == 1, result.output diff --git a/tests/e2e/test_tdp_plan_resume.py b/tests/e2e/test_tdp_plan_resume.py index cbf3075d..0b20ce54 100644 --- a/tests/e2e/test_tdp_plan_resume.py +++ b/tests/e2e/test_tdp_plan_resume.py @@ -1,25 +1,16 @@ # Copyright 2022 TOSIT.IO # SPDX-License-Identifier: Apache-2.0 - -from click.testing import CliRunner - -from tdp.cli.commands.plan.dag import dag from tdp.cli.commands.plan.resume import resume -from tests.e2e.conftest import TDPInitArgs -def test_tdp_plan_resume_nothing_to_resume( - tdp_init: TDPInitArgs, -): - common_args = [ - "--collection-path", - str(tdp_init.collection_path), - "--database-dsn", - tdp_init.db_dsn, - ] - runner = CliRunner() - result = runner.invoke(dag, common_args) +def test_tdp_plan_resume_nothing_to_resume(tdp, runner, collection_path, db_dsn, vars): + result = tdp( + f"init --collection-path {collection_path} --vars {vars} --database-dsn {db_dsn}" + ) assert result.exit_code == 0, result.output - result = runner.invoke(resume, common_args) + + result = runner.invoke( + resume, f"--collection-path {collection_path} --database-dsn {db_dsn}".split() + ) assert result.exit_code == 1, result.output # No deployment to resume. diff --git a/tests/e2e/test_tdp_status_edit.py b/tests/e2e/test_tdp_status_edit.py index f34381db..3e379cac 100644 --- a/tests/e2e/test_tdp_status_edit.py +++ b/tests/e2e/test_tdp_status_edit.py @@ -1,29 +1,23 @@ # Copyright 2022 TOSIT.IO # SPDX-License-Identifier: Apache-2.0 - -from click.testing import CliRunner - from tdp.cli.commands.status.edit import edit -from tests.e2e.conftest import TDPInitArgs -def test_tdp_status_edit( - tdp_init: TDPInitArgs, -): - runner = CliRunner() +def test_tdp_status_edit(tdp, runner, db_dsn, collection_path, vars): + collection_path.init_dag_directory( + { + "service": [ + {"name": "service_install"}, + ], + } + ) + result = tdp( + f"init --collection-path {collection_path} --vars {vars} --database-dsn {db_dsn}" + ) + assert result.exit_code == 0, result.output result = runner.invoke( edit, - [ - "--collection-path", - str(tdp_init.collection_path), - "--database-dsn", - tdp_init.db_dsn, - "--vars", - str(tdp_init.vars), - "service", - "--host", - "localhost", - ], + f"--collection-path {collection_path} --database-dsn {db_dsn} --vars {vars} service --host localhost".split(), ) assert result.exit_code == 0, result.output diff --git a/tests/e2e/test_tdp_status_generate_stales.py b/tests/e2e/test_tdp_status_generate_stales.py index c73685f0..42edd817 100644 --- a/tests/e2e/test_tdp_status_generate_stales.py +++ b/tests/e2e/test_tdp_status_generate_stales.py @@ -1,26 +1,17 @@ # Copyright 2022 TOSIT.IO # SPDX-License-Identifier: Apache-2.0 - -from click.testing import CliRunner - from tdp.cli.commands.status.generate_stales import generate_stales -from tests.e2e.conftest import TDPInitArgs -def test_tdp_status_edit( - tdp_init: TDPInitArgs, -): - runner = CliRunner() +def test_tdp_status_edit(tdp, runner, collection_path, db_dsn, vars): + result = tdp( + f"init --collection-path {collection_path} --vars {vars} --database-dsn {db_dsn}" + ) + assert result.exit_code == 0, result.output + result = runner.invoke( generate_stales, - [ - "--collection-path", - str(tdp_init.collection_path), - "--database-dsn", - tdp_init.db_dsn, - "--vars", - str(tdp_init.vars), - ], + f"--collection-path {collection_path} --database-dsn {db_dsn} --vars {vars}".split(), ) assert result.exit_code == 0, result.output diff --git a/tests/e2e/test_tdp_status_show.py b/tests/e2e/test_tdp_status_show.py index fe9fd644..fe957e4d 100644 --- a/tests/e2e/test_tdp_status_show.py +++ b/tests/e2e/test_tdp_status_show.py @@ -1,26 +1,17 @@ # Copyright 2022 TOSIT.IO # SPDX-License-Identifier: Apache-2.0 - -from click.testing import CliRunner - from tdp.cli.commands.status.show import show -from tests.e2e.conftest import TDPInitArgs -def test_tdp_status_edit( - tdp_init: TDPInitArgs, -): - runner = CliRunner() +def test_tdp_status_edit(tdp, runner, collection_path, db_dsn, vars): + result = tdp( + f"init --collection-path {collection_path} --vars {vars} --database-dsn {db_dsn}" + ) + assert result.exit_code == 0, result.output + result = runner.invoke( show, - [ - "--collection-path", - str(tdp_init.collection_path), - "--database-dsn", - tdp_init.db_dsn, - "--vars", - str(tdp_init.vars), - ], + f"--collection-path {collection_path} --database-dsn {db_dsn} --vars {vars}".split(), ) assert result.exit_code == 0, result.output diff --git a/tests/e2e/test_tdp_vars_validate.py b/tests/e2e/test_tdp_vars_validate.py index c58c4240..ac9d8c35 100644 --- a/tests/e2e/test_tdp_vars_validate.py +++ b/tests/e2e/test_tdp_vars_validate.py @@ -1,15 +1,11 @@ # Copyright 2022 TOSIT.IO # SPDX-License-Identifier: Apache-2.0 -from pathlib import Path - -from click.testing import CliRunner - from tdp.cli.commands.vars.validate import validate -def test_tdp_validate(collection_path: Path, vars: Path): - args = ["--collection-path", collection_path, "--vars", vars] - runner = CliRunner() - result = runner.invoke(validate, args) +def test_tdp_validate(runner, collection_path, vars): + result = runner.invoke( + validate, f"--collection-path {collection_path} --vars {vars}".split() + ) assert result.exit_code == 0, result.output diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py new file mode 100644 index 00000000..f45ae45f --- /dev/null +++ b/tests/unit/conftest.py @@ -0,0 +1,42 @@ +# Copyright 2025 TOSIT.IO +# SPDX-License-Identifier: Apache-2.0 + +from collections.abc import Generator + +import pytest +from sqlalchemy import Engine +from sqlalchemy.orm import Session + +from tdp.core.db import get_engine, get_session +from tdp.core.models import init_database +from tdp.core.models.base_model import BaseModel + + +@pytest.fixture() +def db_engine(db_dsn: str) -> Generator[Engine, None, None]: + """Fixture to create a database engine. + + This fixture initializes the database schema and returns an engine that can be used + in tests. It also ensures that the database is cleared after the test completes. + """ + engine = get_engine(db_dsn) + init_database(engine) + try: + yield engine + finally: + BaseModel.metadata.drop_all(engine) + engine.dispose() + + +@pytest.fixture() +def db_session(db_engine: Engine) -> Generator[Session, None, None]: + """Fixture to create a database session. + + This fixture initializes returns a session that can be used in tests. It also + ensures that the session is closed after the test completes. + """ + session = get_session(db_engine) + try: + yield session + finally: + session.close() diff --git a/tests/unit/core/models/test_deployment_log.py b/tests/unit/core/models/test_deployment_log.py index 79a1ba81..5a2f69f7 100644 --- a/tests/unit/core/models/test_deployment_log.py +++ b/tests/unit/core/models/test_deployment_log.py @@ -7,7 +7,6 @@ import lorem import pytest -from sqlalchemy import Engine from tdp.core.collections import ( Collections, @@ -20,7 +19,7 @@ ) from tdp.core.models.enums import DeploymentStateEnum, OperationStateEnum from tdp.core.models.operation_model import OperationModel -from tests.conftest import create_session, generate_collection_at_path +from tests.conftest import generate_collection_at_path if TYPE_CHECKING: from tdp.core.dag import Dag @@ -338,29 +337,27 @@ def test_deployment_plan_resume_from_operations_extra_vars( assert operation_rec.extra_vars == extra_vars -@pytest.mark.parametrize("db_engine", [True], indirect=True) class Test_multiple_db: - def test_operation_log_length(self, db_engine: Engine): + def test_operation_log_length(self, db_session): # Param for lorem.word() is the number of non-repeated random words. lorem_generator = lorem.word(1000) # Generate a string with 10 000 000 characters lorem_content = " ".join(lorem_gen_until_len(lorem_generator, 10000000)) - with create_session(db_engine) as session: - # All databases except SQLite have a constraint on foreignkey deployment_id. - deployment_content = DeploymentModel(id=1) - session.add(deployment_content) - # Add the lorem ipsum text in bytes to the logs column in the operation table. - operation_content = OperationModel( - deployment_id=1, - operation_order=1, - operation="logs_length_test", - state=OperationStateEnum.RUNNING, - logs=lorem_content.encode("utf-8"), - ) - session.add(operation_content) - session.commit() - assert session.query(OperationModel.logs).first() is not None + # All databases except SQLite have a constraint on foreignkey deployment_id. + deployment_content = DeploymentModel(id=1) + db_session.add(deployment_content) + # Add the lorem ipsum text in bytes to the logs column in the operation table. + operation_content = OperationModel( + deployment_id=1, + operation_order=1, + operation="logs_length_test", + state=OperationStateEnum.RUNNING, + logs=lorem_content.encode("utf-8"), + ) + db_session.add(operation_content) + db_session.commit() + assert db_session.query(OperationModel.logs).first() is not None @pytest.mark.skip(reason="test to rewrite using cluster_status") diff --git a/tests/unit/core/models/test_models.py b/tests/unit/core/models/test_models.py index 3d29f303..6509284a 100644 --- a/tests/unit/core/models/test_models.py +++ b/tests/unit/core/models/test_models.py @@ -4,18 +4,13 @@ import logging from datetime import datetime, timedelta -import pytest -from sqlalchemy.engine import Engine - from tdp.core.models import DeploymentModel, OperationModel, SCHStatusLogModel -from tests.conftest import create_session logger = logging.getLogger(__name__) # TODO: add some status logs -@pytest.mark.parametrize("db_engine", [True], indirect=True) -def test_create_deployment(db_engine: Engine): +def test_create_deployment(db_session): deployment = DeploymentModel( options={ "sources": ["source1", "source2"], @@ -54,29 +49,28 @@ def test_create_deployment(db_engine: Engine): logger.info(operation_rec) logger.info(component_version_log) - with create_session(db_engine) as session: - session.add(deployment) - session.commit() + db_session.add(deployment) + db_session.commit() - result = session.get(DeploymentModel, deployment.id) + result = db_session.get(DeploymentModel, deployment.id) - logger.info(result) - assert result is not None - assert result.options == { - "sources": ["source1", "source2"], - "targets": ["target1", "target2"], - "filter_expression": ".*", - "filter_type": "glob", - "hosts": ["host1", "host2"], - "restart": False, - } - assert result.state == "Success" - assert result.deployment_type == "Dag" + logger.info(result) + assert result is not None + assert result.options == { + "sources": ["source1", "source2"], + "targets": ["target1", "target2"], + "filter_expression": ".*", + "filter_type": "glob", + "hosts": ["host1", "host2"], + "restart": False, + } + assert result.state == "Success" + assert result.deployment_type == "Dag" - logger.info(result.operations) - assert len(result.operations) == 1 - assert result.operations[0].operation_order == 1 - assert result.operations[0].operation == "start_target1" - assert result.operations[0].host == "host1" - assert result.operations[0].state == "Success" - assert result.operations[0].logs == b"operation log" + logger.info(result.operations) + assert len(result.operations) == 1 + assert result.operations[0].operation_order == 1 + assert result.operations[0].operation == "start_target1" + assert result.operations[0].host == "host1" + assert result.operations[0].state == "Success" + assert result.operations[0].logs == b"operation log" diff --git a/tests/unit/test_dao.py b/tests/unit/test_dao.py index c94299b9..5fb23358 100644 --- a/tests/unit/test_dao.py +++ b/tests/unit/test_dao.py @@ -1,178 +1,42 @@ # Copyright 2022 TOSIT.IO # SPDX-License-Identifier: Apache-2.0 -import logging -import random -import string -from typing import List, Optional +from typing import Any -import pytest from sqlalchemy.engine import Engine +from sqlalchemy.orm import Session from tdp.core.models import ( DeploymentModel, OperationModel, - SCHStatusLogModel, - SCHStatusLogSourceEnum, ) +from tdp.core.models.base_model import BaseModel from tdp.core.models.enums import DeploymentStateEnum, OperationStateEnum from tdp.dao import Dao -from tests.conftest import assert_equal_values_in_model, create_session -logger = logging.getLogger(__name__) +def assert_equal_values_in_model(model1: Any, model2: Any) -> bool: + """SQLAlchemy asserts that two identical objects of type DeclarativeBase parent of the BaseModel class, + which is used in TDP as pattern for the table models, are identical if they are compared in the same session, + but different if compared in two different sessions. -def _generate_version() -> str: - """Generate a random version string.""" - return "".join( - random.choice(string.ascii_lowercase + string.digits) for _ in range(6) - ) - + This function therefore transforms the tables into dictionaries and by parsing the coulumns compares their values. + """ + if isinstance(model1, BaseModel) and isinstance(model2, BaseModel): + return model1.to_dict() == model2.to_dict() + else: + return False -def _set_seed(seed: Optional[str] = None) -> None: - """Set the seed for random number generation and log the seed.""" - _seed = seed or _generate_version() # Generate a random seed - random.seed(_seed) - logger.info(f"Random seed set to: {_seed}") - -def _mock_sch_status_log( - service: str, - component: Optional[str], - host: Optional[str], - n: int = 50, - seed: Optional[str] = None, -) -> List["SCHStatusLogModel"]: - """Generate n mock SCHStatusLog entries.""" - _set_seed(seed) - logs = [] - for _ in range(n): - logs.append( - SCHStatusLogModel( - service=service, - component=component, - host=host, - source=SCHStatusLogSourceEnum.STALE, - running_version=( - _generate_version() if random.choice([True, False]) else None - ), - configured_version=( - _generate_version() if random.choice([True, False]) else None - ), - to_config=random.choice([True, False, None]), - to_restart=random.choice([True, False, None]), - ) +def test_get_deployment(db_session: Session, db_engine: Engine): + db_session.add( + DeploymentModel( + id=1, + state=DeploymentStateEnum.RUNNING, ) - return logs - - -def _last_values( - logs: List["SCHStatusLogModel"], -): - """Return an SCHStatusLog holding the last non None value for each column from a list of logs.""" - return ( - logs[-1].service, - logs[-1].component, - logs[-1].host, - next( - ( - log.running_version - for log in reversed(logs) - if log.running_version is not None - ), - None, - ), - next( - ( - log.configured_version - for log in reversed(logs) - if log.configured_version is not None - ), - None, - ), - next( - (log.to_config for log in reversed(logs) if log.to_config is not None), None - ), - next( - (log.to_restart for log in reversed(logs) if log.to_restart is not None), - None, - ), ) + db_session.commit() - -@pytest.mark.skip(reason="db_session fixture needs to be reworked.") -@pytest.mark.parametrize("db_engine", [True], indirect=True) -def test_single_service_component_status(db_engine: Engine): - """Test the get_sch_status query with a single sch.""" - logs = _mock_sch_status_log("smock", "cmock", "hmock", 5) - last_values = _last_values(logs) - - # Use this instead of db_session.add_all() to ensure different timestamps - with create_session(db_engine) as session: - for log in logs: - session.add(log) - # Commit at each step to ensure different timestamps - session.commit() - - with Dao(db_engine) as dao: - assert dao.get_cluster_status() == [last_values] - - -@pytest.mark.skip(reason="db_session fixture needs to be reworked.") -@pytest.mark.parametrize("db_engine", [True], indirect=True) -def test_multiple_service_component_status(db_engine: Engine): - """Test the get_sch_status query with multiple schs.""" - classic_component_logs = _mock_sch_status_log("smock", "cmock", "hmock") - service_noop_logs = _mock_sch_status_log("smock", None, None) - component_noop_logs = _mock_sch_status_log("smock", "cmock", None) - service_logs = _mock_sch_status_log("smock", None, "hmock") - - log_lists = [ - classic_component_logs, - service_noop_logs, - component_noop_logs, - service_logs, - ] - - last_values = set([_last_values(log_list) for log_list in log_lists]) - - # Create iterators for each log list to step through them. - iterators = [iter(log_list) for log_list in log_lists] - - # Fetch the first log from each list. 'None' if the list is empty. - next_logs = [next(it, None) for it in iterators] - - # Continue until all logs have been appended. - while any(log is not None for log in next_logs): - # Get indices of the lists that still have logs left. - available_indices = [i for i, log in enumerate(next_logs) if log is not None] - - # Randomly select one of the available log lists. - chosen_index = random.choice(available_indices) - - # Append the next log from the chosen list to the database. - with create_session(db_engine) as session: - session.add(next_logs[chosen_index]) - # Commit at each step to ensure different timestamps - session.commit() - - # Update the next log for the chosen list. 'None' if no more logs are left. - next_logs[chosen_index] = next(iterators[chosen_index], None) - - with Dao(db_engine) as dao: - assert set(dao.get_cluster_status()) == last_values - - -@pytest.mark.parametrize("db_engine", [True], indirect=True) -def test_get_deployment(db_engine): - with create_session(db_engine) as session: - session.add( - DeploymentModel( - id=1, - state=DeploymentStateEnum.RUNNING, - ) - ) - session.commit() with Dao(db_engine) as dao: assert assert_equal_values_in_model( dao.get_deployment(1), @@ -183,16 +47,15 @@ def test_get_deployment(db_engine): ) -@pytest.mark.parametrize("db_engine", [True], indirect=True) -def test_get_planned_deployment(db_engine): - with create_session(db_engine) as session: - session.add( - DeploymentModel( - id=1, - state=DeploymentStateEnum.PLANNED, - ) +def test_get_planned_deployment(db_session: Session, db_engine: Engine): + db_session.add( + DeploymentModel( + id=1, + state=DeploymentStateEnum.PLANNED, ) - session.commit() + ) + db_session.commit() + with Dao(db_engine) as dao: assert assert_equal_values_in_model( dao.get_planned_deployment(), @@ -203,22 +66,21 @@ def test_get_planned_deployment(db_engine): ) -@pytest.mark.parametrize("db_engine", [True], indirect=True) -def test_get_last_deployment(db_engine): - with create_session(db_engine) as session: - session.add( - DeploymentModel( - id=2, - state=DeploymentStateEnum.FAILURE, - ) +def test_get_last_deployment(db_session: Session, db_engine: Engine): + db_session.add( + DeploymentModel( + id=2, + state=DeploymentStateEnum.FAILURE, ) - session.add( - DeploymentModel( - id=3, - state=DeploymentStateEnum.SUCCESS, - ) + ) + db_session.add( + DeploymentModel( + id=3, + state=DeploymentStateEnum.SUCCESS, ) - session.commit() + ) + db_session.commit() + with Dao(db_engine) as dao: assert assert_equal_values_in_model( dao.get_last_deployment(), @@ -229,22 +91,21 @@ def test_get_last_deployment(db_engine): ) -@pytest.mark.parametrize("db_engine", [True], indirect=True) -def test_get_deployments(db_engine): - with create_session(db_engine) as session: - session.add( - DeploymentModel( - id=1, - state=DeploymentStateEnum.SUCCESS, - ) +def test_get_deployments(db_session: Session, db_engine: Engine): + db_session.add( + DeploymentModel( + id=1, + state=DeploymentStateEnum.SUCCESS, ) - session.add( - DeploymentModel( - id=2, - state=DeploymentStateEnum.PLANNED, - ) + ) + db_session.add( + DeploymentModel( + id=2, + state=DeploymentStateEnum.PLANNED, ) - session.commit() + ) + db_session.commit() + with Dao(db_engine) as dao: assert assert_equal_values_in_model( list(dao.get_last_deployments())[0], @@ -256,19 +117,18 @@ def test_get_deployments(db_engine): ) -@pytest.mark.parametrize("db_engine", [True], indirect=True) -def test_operation(db_engine): - with create_session(db_engine) as session: - session.add(DeploymentModel(id=1, state=DeploymentStateEnum.SUCCESS)) - session.add( - OperationModel( - deployment_id=1, - operation_order=1, - operation="test_operation", - state=OperationStateEnum.SUCCESS, - ) +def test_operation(db_session: Session, db_engine: Engine): + db_session.add(DeploymentModel(id=1, state=DeploymentStateEnum.SUCCESS)) + db_session.add( + OperationModel( + deployment_id=1, + operation_order=1, + operation="test_operation", + state=OperationStateEnum.SUCCESS, ) - session.commit() + ) + db_session.commit() + with Dao(db_engine) as dao: assert assert_equal_values_in_model( dao.get_operations_by_name(