From a5b18a2838614dadd8d71b33b8af7e0755a549ed Mon Sep 17 00:00:00 2001 From: devanshjain Date: Tue, 21 Oct 2025 20:44:01 +0000 Subject: [PATCH 01/17] Add unit tests for ConfigurationCheckModule --- .../module_utils/filesystem_collector_test.py | 815 ++++++++++++++ .../configuration_check_module_test.py | 992 ++++++++++++++++++ 2 files changed, 1807 insertions(+) create mode 100644 tests/module_utils/filesystem_collector_test.py create mode 100644 tests/modules/configuration_check_module_test.py diff --git a/tests/module_utils/filesystem_collector_test.py b/tests/module_utils/filesystem_collector_test.py new file mode 100644 index 00000000..5bcb74c9 --- /dev/null +++ b/tests/module_utils/filesystem_collector_test.py @@ -0,0 +1,815 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +""" +Unit tests for the FileSystemCollector module. + +This test suite provides comprehensive coverage for filesystem data collection, +LVM volume parsing, Azure disk correlation, and NFS storage (ANF/AFS) integration. +Tests use pytest with monkeypatch for mocking, avoiding unittest entirely. +""" + +import json +import logging +from typing import Any, Dict, List +from unittest.mock import Mock + +import pytest + +from src.module_utils.filesystem_collector import FileSystemCollector + + +class MockParent: + """ + Mock SapAutomationQA parent for testing FileSystemCollector. + + Provides logging, error handling, and command execution interfaces + that FileSystemCollector depends on. + """ + + def __init__(self): + self.logs = [] + self.errors = [] + + def log(self, level: int, message: str) -> None: + """Mock log method to capture log messages""" + self.logs.append({"level": level, "message": message}) + + def handle_error(self, error: Exception) -> None: + """Mock handle_error method to capture errors""" + self.errors.append(error) + + def execute_command_subprocess(self, command: str, shell_command: bool = True) -> str: + """Mock execute_command_subprocess method""" + return "mock_output" + + +class MockCheck: + """ + Mock Check object for testing collectors. + + Simulates configuration check objects with collector arguments. + """ + + def __init__(self, collector_args: Dict[str, Any] | None = None): + self.collector_args = collector_args or {} + + +@pytest.fixture +def mock_parent(): + """Fixture to provide a fresh MockParent instance for each test""" + return MockParent() + + +@pytest.fixture +def collector(mock_parent): + """Fixture to provide a FileSystemCollector instance""" + return FileSystemCollector(mock_parent) + + +class TestFileSystemCollectorInit: + """Test suite for FileSystemCollector initialization""" + + def test_initialization(self, mock_parent): + """Test FileSystemCollector initializes properly with parent""" + collector = FileSystemCollector(mock_parent) + assert collector.parent == mock_parent + + +class TestParseFilesystemData: + """Test suite for _parse_filesystem_data method""" + + def test_parse_filesystem_basic(self, collector): + """Test basic filesystem parsing with findmnt and df outputs""" + findmnt_output = ( + "/hana/data /dev/mapper/datavg-datalv xfs rw,relatime,attr2\n" + "/hana/log /dev/sdc ext4 rw,relatime\n" + ) + df_output = ( + "Filesystem 1K-blocks Used Available Use% Mounted\n" + "/dev/mapper/datavg-datalv 524288000 104857600 419430400 20% /hana/data\n" + "/dev/sdc 104857600 10485760 94371840 10% /hana/log\n" + ) + lvm_volume = { + "datalv": { + "dm_path": "/dev/mapper/datavg-datalv", + "vg_name": "datavg", + "stripe_size": "256k", + } + } + result = collector._parse_filesystem_data( + findmnt_output, df_output, lvm_volume, {}, [], [], [] + ) + assert len(result) == 2 + assert result[0]["target"] == "/hana/data" + assert result[0]["vg"] == "datavg" + assert result[0]["stripe_size"] == "256k" + assert result[1]["target"] == "/hana/log" + assert result[1]["vg"] == "" + + def test_parse_filesystem_with_nfs_anf(self, collector): + """Test NFS filesystem parsing with ANF storage correlation""" + findmnt_output = "/hana/shared 10.0.0.5:/volume1 nfs4 rw,relatime\n" + df_output = ( + "Filesystem 1K-blocks Used Available Use% Mounted\n" + "10.0.0.5:/volume1 1048576000 104857600 943718400 10% /hana/shared\n" + ) + anf_storage_data = [ + { + "ip": "10.0.0.5", + "throughputMibps": 1024, + "serviceLevel": "Premium", + } + ] + result = collector._parse_filesystem_data( + findmnt_output, df_output, {}, {}, [], anf_storage_data, [] + ) + assert len(result) == 1 + assert result[0]["target"] == "/hana/shared" + assert result[0]["max_mbps"] == 1024 + assert result[0]["max_iops"] == "-" + assert result[0]["nfs_type"] == "ANF" + + def test_parse_filesystem_with_nfs_afs(self, collector): + """Test NFS filesystem parsing with AFS storage correlation""" + findmnt_output = "/hana/backup 10.0.1.10:/share nfs rw,relatime\n" + df_output = ( + "Filesystem 1K-blocks Used Available Use% Mounted\n" + "10.0.1.10:/share 2097152000 209715200 1887436800 10% /hana/backup\n" + ) + afs_storage_data = [ + { + "NFSAddress": "10.0.1.10:/share", + "ThroughputMibps": 512, + "IOPS": 50000, + } + ] + result = collector._parse_filesystem_data( + findmnt_output, df_output, {}, {}, [], [], afs_storage_data + ) + assert len(result) == 1 + assert result[0]["max_mbps"] == 512 + assert result[0]["max_iops"] == 50000 + assert result[0]["nfs_type"] == "AFS" + + def test_parse_filesystem_with_azure_disk(self, collector): + """Test filesystem parsing with direct Azure disk correlation""" + findmnt_output = "/datadisk /dev/sdc xfs rw,relatime\n" + df_output = ( + "Filesystem 1K-blocks Used Available Use% Mounted\n" + "/dev/sdc 524288000 52428800 471859200 10% /datadisk\n" + ) + azure_disk_data = [ + { + "name": "sdc", + "mbps": 750, + "iops": 20000, + } + ] + result = collector._parse_filesystem_data( + findmnt_output, df_output, {}, {}, azure_disk_data, [], [] + ) + assert len(result) == 1 + assert result[0]["max_mbps"] == 750 + assert result[0]["max_iops"] == 20000 + + def test_parse_filesystem_with_lvm_mapping(self, collector, mock_parent): + """Test filesystem parsing with LVM volume group to disk mapping""" + findmnt_output = "/hana/data /dev/mapper/datavg-datalv xfs rw,relatime\n" + df_output = ( + "Filesystem 1K-blocks Used Available Use% Mounted\n" + "/dev/mapper/datavg-datalv 1048576000 104857600 943718400 10% /hana/data\n" + ) + lvm_volume = { + "datalv": { + "dm_path": "/dev/mapper/datavg-datalv", + "vg_name": "datavg", + "stripe_size": "256k", + } + } + vg_to_disk_names = {"datavg": ["disk1", "disk2"]} + result = collector._parse_filesystem_data( + findmnt_output, + df_output, + lvm_volume, + {}, + [], + [], + [], + vg_to_disk_names, + ) + assert len(result) == 1 + assert result[0]["vg"] == "datavg" + assert result[0]["azure_disk_names"] == ["disk1", "disk2"] + assert any("Mapped VG" in log["message"] for log in mock_parent.logs) + + +class TestMapVgToDiskNames: + """Test suite for _map_vg_to_disk_names method""" + + def test_map_vg_to_disk_names_success(self, collector, mock_parent): + """Test successful VG to disk name mapping with complete data""" + lvm_fullreport = { + "report": [ + { + "pv": [{"pv_name": "/dev/sdc"}, {"pv_name": "/dev/sdd"}], + "vg": [{"vg_name": "datavg"}], + } + ] + } + imds_metadata = [ + {"lun": "0", "name": "disk1"}, + {"lun": "1", "name": "disk2"}, + ] + device_lun_map = {"sdc": "0", "sdd": "1"} + result = collector._map_vg_to_disk_names(lvm_fullreport, imds_metadata, device_lun_map) + assert "datavg" in result + assert sorted(result["datavg"]) == ["disk1", "disk2"] + assert any("Found 1 LVM reports" in log["message"] for log in mock_parent.logs) + + def test_map_vg_to_disk_names_error_cases(self, collector, mock_parent): + """Test VG mapping handles various error conditions: missing LUN, missing IMDS, no VG names, exceptions""" + mock_parent.logs.clear() + lvm_fullreport = { + "report": [{"pv": [{"pv_name": "/dev/sdc"}], "vg": [{"vg_name": "datavg"}]}] + } + result = collector._map_vg_to_disk_names( + lvm_fullreport, [{"lun": "0", "name": "disk1"}], {} + ) + assert result == {} or result.get("datavg") == [] + assert any("No LUN mapping found" in log["message"] for log in mock_parent.logs) + + mock_parent.logs.clear() + result = collector._map_vg_to_disk_names(lvm_fullreport, [], {"sdc": "0"}) + assert result.get("datavg", []) == [] + assert any("No IMDS entry for LUN" in log["message"] for log in mock_parent.logs) + + mock_parent.logs.clear() + lvm_fullreport_no_vg = {"report": [{"pv": [{"pv_name": "/dev/sdc"}], "vg": []}]} + result = collector._map_vg_to_disk_names(lvm_fullreport_no_vg, {}, {}) + assert result == {} + assert any("no VG names found" in log["message"] for log in mock_parent.logs) + + mock_parent.logs.clear() + result = collector._map_vg_to_disk_names(None, [], {}) + assert result == {} + assert any("Failed to map VG" in log["message"] for log in mock_parent.logs) + + +class TestCollectLvmVolumes: + """Test suite for collect_lvm_volumes method""" + + def test_collect_lvm_volumes_success_and_edge_cases(self, collector): + """Test LVM volume collection: success cases, rootvg filtering, and invalid pv_count handling""" + lvm_fullreport = { + "report": [ + { + "vg": [ + {"vg_name": "datavg", "pv_count": "2", "lv_count": "1", "vg_size": "1024g"} + ], + "lv": [ + { + "lv_name": "datalv", + "lv_full_name": "datavg/datalv", + "lv_path": "/dev/datavg/datalv", + "lv_dm_path": "/dev/mapper/datavg-datalv", + "lv_layout": "linear", + "lv_size": "512g", + "lv_uuid": "uuid123", + } + ], + "seg": [{"lv_uuid": "uuid123", "stripes": "2", "stripe_size": "256k"}], + } + ] + } + lvm_volumes, lvm_groups = collector.collect_lvm_volumes(lvm_fullreport) + assert "datalv" in lvm_volumes + assert lvm_volumes["datalv"]["vg_name"] == "datavg" + assert lvm_volumes["datalv"]["stripe_size"] == "256k" + assert "datavg" in lvm_groups + assert lvm_groups["datavg"]["disks"] == 2 + + lvm_fullreport_vgname = { + "report": [ + { + "vg": [ + {"vg_name": "logvg", "pv_count": "1", "lv_count": "1", "vg_size": "256g"} + ], + "lv": [{"lv_name": "loglv", "vg_name": "logvg", "lv_uuid": "uuid456"}], + "seg": [], + } + ] + } + lvm_volumes, lvm_groups = collector.collect_lvm_volumes(lvm_fullreport_vgname) + assert "loglv" in lvm_volumes + assert lvm_volumes["loglv"]["vg_name"] == "logvg" + lvm_fullreport_rootvg = { + "report": [ + { + "vg": [{"vg_name": "rootvg", "pv_count": "1"}], + "lv": [ + {"lv_name": "rootlv", "lv_full_name": "rootvg/rootlv", "lv_uuid": "uuid789"} + ], + "seg": [], + } + ] + } + lvm_volumes, _ = collector.collect_lvm_volumes(lvm_fullreport_rootvg) + assert "rootlv" not in lvm_volumes + lvm_fullreport_invalid = { + "report": [ + { + "vg": [{"vg_name": "testvg", "pv_count": "invalid", "lv_count": "1"}], + "lv": [], + "seg": [], + } + ] + } + _, lvm_groups = collector.collect_lvm_volumes(lvm_fullreport_invalid) + assert lvm_groups["testvg"]["disks"] == 0 + + def test_collect_lvm_volumes_exception(self, collector): + """Test LVM collection handles exceptions and returns error message""" + result = collector.collect_lvm_volumes(None) + assert isinstance(result, str) + assert "ERROR: LVM volume collection failed" in result + + +class TestParseMetadata: + """Test suite for _parse_metadata method""" + + def test_parse_metadata_various_formats(self, collector, mock_parent): + """Test parsing metadata in various formats: lists, dicts, JSON strings, newline-delimited JSON""" + raw_data = [{"name": "disk1", "size": "512"}, {"name": "disk2", "size": "1024"}] + result = collector._parse_metadata(raw_data, "test") + assert len(result) == 2 + assert result[0]["name"] == "disk1" + assert any("Successfully parsed 2 test items" in log["message"] for log in mock_parent.logs) + mock_parent.logs.clear() + raw_data = ['{"name": "disk1", "size": "512"}', {"name": "disk2"}] + result = collector._parse_metadata(raw_data, "test") + assert len(result) == 2 + assert result[0]["name"] == "disk1" + assert result[1]["name"] == "disk2" + raw_data = {"name": "disk1", "size": "512"} + result = collector._parse_metadata(raw_data, "test") + assert len(result) == 1 + assert result[0]["name"] == "disk1" + raw_data = '[{"name": "disk1"}, {"name": "disk2"}]' + result = collector._parse_metadata(raw_data, "test") + assert len(result) == 2 + mock_parent.logs.clear() + raw_data = '{"name": "disk1"}\n{"name": "disk2"}\n' + result = collector._parse_metadata(raw_data, "test") + assert len(result) == 2 + assert result[0]["name"] == "disk1" + + def test_parse_metadata_error_cases(self, collector, mock_parent): + """Test metadata parsing handles empty inputs, invalid JSON, and non-dict items""" + assert collector._parse_metadata(None, "test") == [] + assert collector._parse_metadata("", "test") == [] + assert collector._parse_metadata([], "test") == [] + assert any("empty or None" in log["message"] for log in mock_parent.logs) + mock_parent.logs.clear() + raw_data = ["invalid json {", '{"valid": "json"}'] + result = collector._parse_metadata(raw_data, "test") + assert len(result) == 1 + assert result[0]["valid"] == "json" + assert any("Failed to parse" in log["message"] for log in mock_parent.logs) + raw_data = [{"name": "disk1"}, "string_item", 12345, None] + result = collector._parse_metadata(raw_data, "test") + assert len(result) == 1 + assert result[0]["name"] == "disk1" + + +class TestGatherAllFilesystemInfo: + """Test suite for gather_all_filesystem_info method""" + + def test_gather_all_filesystem_info_complete(self, collector, mock_parent): + """Test comprehensive filesystem info gathering with all data types""" + context = { + "lvm_fullreport": {"report": [{"vg": [], "lv": [], "seg": []}]}, + "azure_disks_metadata": [ + {"name": "disk1", "mbps": 500, "iops": 10000}, + {"name": "disk2", "mbps": 500, "iops": 10000}, + ], + "anf_storage_metadata": [], + "afs_storage_metadata": [], + } + filesystems = [ + { + "target": "/hana/data", + "source": "/dev/mapper/datavg-datalv", + "fstype": "xfs", + "vg": "datavg", + "options": "rw,relatime", + "size": "1T", + "free": "800G", + "used": "200G", + "used_percent": "20%", + } + ] + lvm_volumes = { + "datalv": { + "dm_path": "/dev/mapper/datavg-datalv", + "stripe_size": "256k", + "stripes": "2", + "size": "1024g", + } + } + vg_to_disk_names = {"datavg": ["disk1", "disk2"]} + result = collector.gather_all_filesystem_info( + context, filesystems, lvm_volumes, vg_to_disk_names + ) + assert len(result) == 1 + assert result[0]["target"] == "/hana/data" + assert result[0]["max_mbps"] == 1000 + assert result[0]["max_iops"] == 20000 + assert result[0]["stripe_size"] == "256k" + assert result[0]["disk_count"] == 2 + + def test_gather_all_filesystem_info_nfs_anf(self, collector, mock_parent): + """Test filesystem info gathering for ANF NFS mounts""" + context = { + "lvm_fullreport": {"report": []}, + "azure_disks_metadata": [], + "anf_storage_metadata": [ + { + "ip": "10.0.0.5", + "throughputMibps": 2048, + "serviceLevel": "Ultra", + } + ], + "afs_storage_metadata": [], + } + filesystems = [ + { + "target": "/hana/shared", + "source": "10.0.0.5:/volume1", + "fstype": "nfs4", + "vg": "", + "options": "rw", + "size": "2T", + "free": "1.5T", + "used": "500G", + "used_percent": "25%", + } + ] + result = collector.gather_all_filesystem_info(context, filesystems, {}, {}) + assert len(result) == 1 + assert result[0]["max_mbps"] == 2048 + assert result[0]["max_iops"] == "-" + assert any("Correlated NFS" in log["message"] for log in mock_parent.logs) + + def test_gather_all_filesystem_info_direct_disk(self, collector, mock_parent): + """Test filesystem info gathering for direct Azure disk mounts""" + context = { + "lvm_fullreport": {"report": []}, + "azure_disks_metadata": [{"name": "sdc", "mbps": 750, "iops": 20000}], + "anf_storage_metadata": [], + "afs_storage_metadata": [], + } + filesystems = [ + { + "target": "/datadisk", + "source": "/dev/sdc", + "fstype": "ext4", + "vg": "", + "options": "rw", + "size": "512G", + "free": "400G", + "used": "112G", + "used_percent": "22%", + } + ] + result = collector.gather_all_filesystem_info(context, filesystems, {}, {}) + assert len(result) == 1 + assert result[0]["max_mbps"] == 750 + assert result[0]["max_iops"] == 20000 + assert result[0]["disk_count"] == 1 + + def test_gather_all_filesystem_info_error_cases(self, collector, mock_parent): + context = {"lvm_fullreport": ""} + result = collector.gather_all_filesystem_info(context, [], {}, {}) + assert result == [] + assert any("lvm_fullreport is empty" in log["message"] for log in mock_parent.logs) + mock_parent.logs.clear() + mock_parent.errors.clear() + result = collector.gather_all_filesystem_info(None, [], {}, {}) + assert result == [] + assert len(mock_parent.errors) == 1 + + +class TestGatherAzureDisksInfo: + """Test suite for gather_azure_disks_info method""" + + def test_gather_azure_disks_info_complete(self, collector, mock_parent): + """Test Azure disk info gathering with complete metadata""" + context = { + "imds_disks_metadata": [ + { + "lun": "0", + "name": "disk1", + "diskSizeGB": "512", + "storageProfile": {"sku": "Premium_LRS"}, + "caching": "ReadWrite", + "writeAcceleratorEnabled": True, + } + ], + "azure_disks_metadata": [ + { + "name": "disk1", + "size": "512", + "sku": "Premium_LRS", + "iops": 20000, + "mbps": 750, + "tier": "P30", + "encryption": "EncryptionAtRestWithPlatformKey", + } + ], + } + lvm_fullreport = { + "report": [ + { + "pv": [{"pv_name": "/dev/sdc"}], + "vg": [{"vg_name": "datavg"}], + } + ] + } + device_lun_map = {"sdc": "0"} + result = collector.gather_azure_disks_info(context, lvm_fullreport, device_lun_map) + assert len(result) == 1 + assert result[0]["LUNID"] == "0" + assert result[0]["Name"] == "disk1" + assert result[0]["VolumeGroup"] == "datavg" + assert result[0]["IOPS"] == 20000 + assert result[0]["MBPS"] == 750 + + def test_gather_azure_disks_info_edge_cases(self, collector, mock_parent): + """Test Azure disk info gathering for disks not in VG and exception handling""" + context = { + "imds_disks_metadata": [{"lun": "0", "name": "disk1"}], + "azure_disks_metadata": [{"name": "disk1", "iops": 10000, "mbps": 500}], + } + device_lun_map = {"sdc": "0"} + result = collector.gather_azure_disks_info(context, {"report": []}, device_lun_map) + assert len(result) == 1 + assert result[0]["VolumeGroup"] == "" + context_invalid = {"imds_disks_metadata": None, "azure_disks_metadata": None} + result = collector.gather_azure_disks_info(context_invalid, {}, {}) + assert result == [] + assert isinstance(result, list) + + +class TestGatherLvmGroupsInfo: + """Test suite for gather_lvm_groups_info method""" + + def test_gather_lvm_groups_info_success_and_errors(self, collector, mock_parent): + """Test LVM groups info gathering: success, no disk mapping, and exception handling""" + lvm_groups = { + "datavg": {"name": "datavg", "disks": 2, "logical_volumes": 1, "total_size": "1024g"} + } + vg_to_disk_names = {"datavg": ["disk1", "disk2"]} + azure_disk_data = [ + {"name": "disk1", "iops": 10000, "mbps": 500}, + {"name": "disk2", "iops": 10000, "mbps": 500}, + ] + result = collector.gather_lvm_groups_info(lvm_groups, vg_to_disk_names, azure_disk_data) + assert len(result) == 1 + assert result[0]["Name"] == "datavg" + assert result[0]["TotalIOPS"] == 20000 + assert result[0]["TotalMBPS"] == 1000 + assert result[0]["TotalSize"] == "1024GiB" + lvm_groups_no_mapping = {"testvg": {"name": "testvg", "disks": 1, "total_size": "512g"}} + result = collector.gather_lvm_groups_info(lvm_groups_no_mapping, {}, []) + assert len(result) == 1 + assert result[0]["TotalIOPS"] == 0 + mock_parent.logs.clear() + result = collector.gather_lvm_groups_info(None, {}, []) + assert result == [] + assert any("Failed to gather LVM group" in log["message"] for log in mock_parent.logs) + + +class TestGatherLvmVolumesInfo: + """Test suite for gather_lvm_volumes_info method""" + + def test_gather_lvm_volumes_info_success_and_errors(self, collector, mock_parent): + """Test LVM volumes info gathering: success, size conversion, and exception handling""" + lvm_volumes = { + "datalv": { + "name": "datalv", + "vg_name": "datavg", + "path": "/dev/datavg/datalv", + "dm_path": "/dev/mapper/datavg-datalv", + "layout": "linear", + "size": "512g", + "stripe_size": "256k", + "stripes": "2", + } + } + result = collector.gather_lvm_volumes_info(lvm_volumes) + assert len(result) == 1 + assert result[0]["Name"] == "datalv" + assert result[0]["Size"] == "512GiB" + assert result[0]["StripeSize"] == "256k" + assert any( + "Successfully correlated LVM volume" in log["message"] for log in mock_parent.logs + ) + lvm_volumes_tb = { + "loglv": { + "name": "loglv", + "vg_name": "logvg", + "size": "2t", + "stripe_size": "", + "stripes": "", + } + } + result = collector.gather_lvm_volumes_info(lvm_volumes_tb) + assert result[0]["Size"] == "2TiB" + mock_parent.logs.clear() + result = collector.gather_lvm_volumes_info(None) + assert result == [] + assert any("Failed to gather LVM volume" in log["message"] for log in mock_parent.logs) + + +class TestGatherAnfVolumesInfo: + """Test suite for gather_anf_volumes_info method""" + + def test_gather_anf_volumes_info_mounted_only(self, collector, mock_parent): + """Test ANF volumes info only includes mounted volumes""" + filesystems = [ + { + "target": "/hana/shared", + "source": "10.0.0.5:/volume1", + "fstype": "nfs4", + "nfs_type": "ANF", + } + ] + anf_storage_data = [ + { + "ip": "10.0.0.5", + "id": "/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.NetApp/" + "netAppAccounts/account1/capacityPools/pool1/volumes/vol1", + "serviceLevel": "Premium", + "throughputMibps": 1024, + "protocolTypes": ["NFSv4.1"], + }, + { + "ip": "10.0.0.6", + "id": "/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.NetApp/" + "netAppAccounts/account1/capacityPools/pool1/volumes/vol2", + "serviceLevel": "Standard", + "throughputMibps": 512, + "protocolTypes": ["NFSv3"], + }, + ] + result = collector.gather_anf_volumes_info(filesystems, anf_storage_data) + assert len(result) == 1 + assert result[0]["VolumeName"] == "vol1" + assert result[0]["PoolName"] == "pool1" + assert result[0]["ThroughputMibps"] == 1024 + assert any("mounted ANF IPs" in log["message"] for log in mock_parent.logs) + + def test_gather_anf_volumes_info_name_fallback(self, collector): + """Test ANF volumes info uses name field as fallback""" + filesystems = [ + { + "target": "/hana/shared", + "source": "10.0.0.5:/volume1", + "fstype": "nfs4", + "nfs_type": "ANF", + } + ] + anf_storage_data = [ + { + "ip": "10.0.0.5", + "name": "account1/pool1/vol1", + "serviceLevel": "Premium", + "throughputMibps": 1024, + "protocolTypes": ["NFSv4.1", "NFSv3"], + } + ] + result = collector.gather_anf_volumes_info(filesystems, anf_storage_data) + assert len(result) == 1 + assert result[0]["VolumeName"] == "vol1" + assert result[0]["PoolName"] == "pool1" + assert "NFSv4.1, NFSv3" in result[0]["ProtocolTypes"] + + def test_gather_anf_volumes_info_edge_cases(self, collector, mock_parent): + """Test ANF volumes info: non-ANF filesystems filtering and exception handling""" + filesystems = [{"target": "/data", "source": "/dev/sdc", "fstype": "xfs"}] + anf_storage_data = [{"ip": "10.0.0.5", "name": "vol1"}] + result = collector.gather_anf_volumes_info(filesystems, anf_storage_data) + assert len(result) == 0 + result = collector.gather_anf_volumes_info(None, []) + assert result == [] + assert any("Failed to gather ANF volume" in log["message"] for log in mock_parent.logs) + + +class TestCollectMethod: + """Test suite for the main collect method""" + + def test_collect_complete_success(self, collector, mock_parent): + """Test complete collection workflow with all data types""" + context = { + "lvm_fullreport": { + "report": [ + { + "vg": [ + { + "vg_name": "datavg", + "pv_count": "1", + "lv_count": "1", + "vg_size": "512g", + } + ], + "lv": [ + { + "lv_name": "datalv", + "vg_name": "datavg", + "lv_path": "/dev/datavg/datalv", + "lv_dm_path": "/dev/mapper/datavg-datalv", + "lv_layout": "linear", + "lv_size": "512g", + "lv_uuid": "uuid1", + } + ], + "seg": [{"lv_uuid": "uuid1", "stripes": "1", "stripe_size": "64k"}], + } + ] + }, + "mount_info": "/hana/data /dev/mapper/datavg-datalv xfs rw,relatime", + "df_info": ( + "Filesystem 1K-blocks Used Available Use% Mounted\n" + "/dev/mapper/datavg-datalv 524288000 52428800 471859200 10% /hana/data" + ), + "azure_disks_metadata": [{"name": "disk1", "mbps": 500, "iops": 10000}], + "anf_storage_metadata": [], + "afs_storage_metadata": [], + "imds_disks_metadata": [{"lun": "0", "name": "disk1"}], + "device_lun_map": {"sdc": "0"}, + } + result = collector.collect(MockCheck(), context) + assert isinstance(result, dict) + assert "filesystems" in result + assert "lvm_volumes" in result + assert "lvm_groups" in result + assert "formatted_filesystem_info" in result + assert "azure_disks_info" in result + assert "lvm_groups_info" in result + assert "lvm_volumes_info" in result + assert "anf_volumes_info" in result + assert len(result["filesystems"]) > 0 + + def test_collect_empty_lvm_fullreport(self, collector, mock_parent): + """Test collect handles empty lvm_fullreport""" + context = { + "lvm_fullreport": "", + "mount_info": "/data /dev/sdc xfs rw", + "df_info": "Filesystem 1K-blocks Used Available Use% Mounted\n/dev/sdc 524288000 52428800 471859200 10% /data", + } + result = collector.collect(MockCheck(), context) + assert any( + "lvm_fullreport is empty or invalid" in log["message"] for log in mock_parent.logs + ) + + def test_collect_error_and_logging_scenarios(self, collector, mock_parent): + """Test collect handles various error scenarios and provides comprehensive logging""" + context = { + "lvm_fullreport": {"report": []}, + "mount_info": "", + "df_info": "Filesystem 1K-blocks Used Available Use% Mounted", + "imds_disks_metadata": [{"lun": "0", "name": "disk1"}], + } + result = collector.collect(MockCheck(), context) + assert any("device_lun_map not found" in log["message"] for log in mock_parent.logs) + mock_parent.logs.clear() + mock_parent.errors.clear() + result = collector.collect(MockCheck(), None) + assert isinstance(result, dict) + assert any("ERROR:" in key for key in result.keys()) + assert len(mock_parent.errors) == 1 + mock_parent.logs.clear() + context_anf = { + "lvm_fullreport": {"report": []}, + "mount_info": "", + "df_info": "Filesystem 1K-blocks Used Available Use% Mounted", + "anf_storage_metadata": [{"ip": "10.0.0.5"}], + } + result = collector.collect(MockCheck(), context_anf) + assert any("Raw ANF data type" in log["message"] for log in mock_parent.logs) + mock_parent.logs.clear() + context_full = { + "lvm_fullreport": {"report": []}, + "mount_info": "/data /dev/sdc xfs rw", + "df_info": "Filesystem 1K-blocks Used Available Use% Mounted\n/dev/sdc 1024000 102400 921600 10% /data", + "azure_disks_metadata": [], + "anf_storage_metadata": [], + "afs_storage_metadata": [], + "imds_disks_metadata": [], + } + result = collector.collect(MockCheck(), context_full) + logged_messages = [log["message"] for log in mock_parent.logs] + assert any("findmnt_output" in msg for msg in logged_messages) + assert any("df_output" in msg for msg in logged_messages) + assert any("Azure disk data type" in msg for msg in logged_messages) diff --git a/tests/modules/configuration_check_module_test.py b/tests/modules/configuration_check_module_test.py new file mode 100644 index 00000000..a7808112 --- /dev/null +++ b/tests/modules/configuration_check_module_test.py @@ -0,0 +1,992 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +""" +Unit tests for the ConfigurationCheckModule. + +This test suite provides comprehensive coverage for configuration check execution, +validation, parallel processing, and error handling. +Tests use pytest with monkeypatch for mocking, avoiding unittest entirely. +""" + +import json +import logging +from datetime import datetime +from typing import Any, Dict, Optional +from unittest.mock import Mock, MagicMock, patch + +import pytest + +from src.modules.configuration_check_module import ConfigurationCheckModule +from src.module_utils.enums import ( + TestStatus, + TestSeverity, + Check, + CheckResult, + ApplicabilityRule, +) + + +class MockAnsibleModule: + """ + Mock Ansible module for testing ConfigurationCheckModule. + + Simulates the AnsibleModule interface with params, exit_json, and fail_json. + """ + + def __init__(self, params: Optional[Dict[str, Any]] = None): + self.params = params or { + "check_file_content": "", + "context": {}, + "filter_tags": None, + "filter_categories": None, + "parallel_execution": False, + "max_workers": 3, + "enable_retry": False, + "workspace_directory": "/tmp/workspace", + "hostname": None, + "test_group_invocation_id": "test-id", + "test_group_name": "test-group", + "azure_resources": {}, + } + self.exit_calls = [] + self.fail_calls = [] + + def exit_json(self, **kwargs): + """Mock exit_json to capture successful exits""" + self.exit_calls.append(kwargs) + + def fail_json(self, **kwargs): + """Mock fail_json to capture failure exits""" + self.fail_calls.append(kwargs) + + +@pytest.fixture +def mock_ansible_module(): + """Fixture to provide a fresh MockAnsibleModule instance""" + return MockAnsibleModule() + + +@pytest.fixture +def config_module(mock_ansible_module): + """Fixture to provide a ConfigurationCheckModule instance""" + return ConfigurationCheckModule(mock_ansible_module) + + +@pytest.fixture +def sample_check(): + """Fixture to provide a sample Check object""" + return Check( + id="test_check_001", + name="Test Check", + description="A test check for validation", + category="System", + workload="SAP", + severity=TestSeverity.WARNING, + collector_type="command", + collector_args={"command": "echo test"}, + validator_type="string", + validator_args={"expected": "test"}, + tags=["test", "system"], + applicability=[], + references={}, + report="check", + ) + + +class TestConfigurationCheckModuleInit: + """Test suite for ConfigurationCheckModule initialization""" + + def test_initialization(self, mock_ansible_module): + """Test ConfigurationCheckModule initializes properly""" + module = ConfigurationCheckModule(mock_ansible_module) + assert module.module == mock_ansible_module + assert module.module_params == mock_ansible_module.params + assert module.checks == [] + assert module.hostname is None + assert module.context == {} + assert "check_results" in module.result + assert len(module._collector_registry) > 0 + assert len(module._validator_registry) > 0 + + def test_collector_registry_initialization(self, config_module): + """Test collector registry contains expected collectors""" + registry = config_module._collector_registry + assert "command" in registry + assert "azure" in registry + assert "module" in registry + + def test_validator_registry_initialization(self, config_module): + """Test validator registry contains expected validators""" + registry = config_module._validator_registry + assert "string" in registry + assert "range" in registry + assert "list" in registry + assert "check_support" in registry + assert "properties" in registry + + +class TestSetContext: + """Test suite for set_context method""" + + def test_set_context_with_hostname(self, config_module): + """Test setting context with hostname""" + context = {"hostname": "testhost", "os": "SLES", "version": "15.3"} + config_module.set_context(context) + assert config_module.context == context + assert config_module.hostname == "testhost" + + def test_set_context_without_hostname(self, config_module): + """Test setting context without hostname""" + context = {"os": "RHEL", "version": "8.4"} + config_module.set_context(context) + assert config_module.context == context + assert config_module.hostname is None + + +class TestLoadChecks: + """Test suite for load_checks method""" + + def test_load_checks_from_yaml_string(self, config_module): + """Test loading checks from YAML string""" + yaml_content = """ +checks: + - id: check_001 + name: Test Check + description: Test description + category: System + severity: WARNING + collector_type: command + collector_args: + command: "echo test" + validator_type: string + validator_args: + expected: "test" + tags: + - test + applicability: + os: SLES +""" + config_module.load_checks(yaml_content) + assert len(config_module.checks) == 1 + assert config_module.checks[0].id == "check_001" + assert config_module.checks[0].name == "Test Check" + assert len(config_module.checks[0].applicability) == 1 + + def test_load_checks_with_multiple_checks(self, config_module): + """Test loading multiple checks""" + yaml_content = """ +checks: + - id: check_001 + name: First Check + category: System + severity: INFO + collector_type: command + - id: check_002 + name: Second Check + category: Network + severity: CRITICAL + collector_type: azure +""" + config_module.load_checks(yaml_content) + assert len(config_module.checks) == 2 + assert config_module.checks[0].id == "check_001" + assert config_module.checks[1].id == "check_002" + + def test_load_checks_empty_content(self, config_module, monkeypatch): + """Test loading checks with empty content""" + config_module.load_checks("") + assert len(config_module.checks) == 0 + + +class TestIsCheckApplicable: + """Test suite for is_check_applicable method""" + + def test_check_applicable_no_rules(self, config_module, sample_check): + """Test check with no applicability rules is always applicable""" + config_module.set_context({"os": "SLES"}) + sample_check.applicability = [] + assert config_module.is_check_applicable(sample_check) is True + + def test_check_applicable_matching_rule(self, config_module, sample_check): + """Test check with matching applicability rule""" + config_module.set_context({"os": "SLES", "version": "15.3"}) + sample_check.applicability = [ApplicabilityRule(property="os", value="SLES")] + assert config_module.is_check_applicable(sample_check) is True + + def test_check_not_applicable_non_matching_rule(self, config_module, sample_check): + """Test check with non-matching applicability rule""" + config_module.set_context({"os": "RHEL"}) + sample_check.applicability = [ApplicabilityRule(property="os", value="SLES")] + assert config_module.is_check_applicable(sample_check) is False + + def test_check_applicable_multiple_rules_all_match(self, config_module, sample_check): + """Test check with multiple rules that all match""" + config_module.set_context({"os": "SLES", "role": "db"}) + sample_check.applicability = [ + ApplicabilityRule(property="os", value="SLES"), + ApplicabilityRule(property="role", value="db"), + ] + assert config_module.is_check_applicable(sample_check) is True + + +class TestValidators: + """Test suite for validation methods""" + + def test_validate_string_success(self, config_module, sample_check): + """Test string validation with matching values""" + sample_check.validator_args = {"expected": "test_value"} + result = config_module.validate_string(sample_check, "test_value") + assert result["status"] == TestStatus.SUCCESS.value + + def test_validate_string_failure(self, config_module, sample_check): + """Test string validation with non-matching values""" + sample_check.validator_args = {"expected": "expected_value"} + sample_check.severity = TestSeverity.WARNING + result = config_module.validate_string(sample_check, "actual_value") + assert result["status"] == TestStatus.WARNING.value + + def test_validate_string_case_insensitive(self, config_module, sample_check): + """Test case-insensitive string validation""" + sample_check.validator_args = {"expected": "TEST", "case_insensitive": True} + result = config_module.validate_string(sample_check, "test") + assert result["status"] == TestStatus.SUCCESS.value + + def test_validate_string_whitespace_handling(self, config_module, sample_check): + """Test string validation with whitespace handling""" + sample_check.validator_args = {"expected": "test value", "strip_whitespace": True} + result = config_module.validate_string(sample_check, " test value ") + assert result["status"] == TestStatus.SUCCESS.value + + def test_validate_numeric_range_within_bounds(self, config_module, sample_check): + """Test numeric range validation within bounds""" + sample_check.validator_args = {"min": 10, "max": 100} + result = config_module.validate_numeric_range(sample_check, "50") + assert result["status"] == TestStatus.SUCCESS.value + + def test_validate_numeric_range_out_of_bounds(self, config_module, sample_check): + """Test numeric range validation out of bounds""" + sample_check.validator_args = {"min": 10, "max": 100} + sample_check.severity = TestSeverity.CRITICAL + result = config_module.validate_numeric_range(sample_check, "150") + assert result["status"] == TestStatus.ERROR.value + + def test_validate_numeric_range_invalid_input(self, config_module, sample_check): + """Test numeric range validation with invalid input""" + sample_check.validator_args = {"min": 10, "max": 100} + result = config_module.validate_numeric_range(sample_check, "not_a_number") + assert result["status"] == TestStatus.ERROR.value + + def test_validate_list_contains_match(self, config_module, sample_check): + """Test list validation with matching item""" + sample_check.validator_args = {"valid_list": ["item1", "item2", "item3"]} + result = config_module.validate_list(sample_check, "item2") + assert result["status"] == TestStatus.SUCCESS.value + + def test_validate_list_no_match(self, config_module, sample_check): + """Test list validation with no matching items""" + sample_check.validator_args = {"valid_list": ["item1", "item2"]} + sample_check.severity = TestSeverity.WARNING + result = config_module.validate_list(sample_check, "item3, item4") + assert result["status"] == TestStatus.WARNING.value + + def test_validate_properties_success(self, config_module, sample_check): + """Test properties validation with matching properties""" + sample_check.validator_args = { + "properties": [ + {"property": "cpu", "value": "4"}, + {"property": "memory", "value": "16GB"}, + ] + } + collected = json.dumps({"cpu": "4", "memory": "16GB", "disk": "100GB"}) + result = config_module.validate_properties(sample_check, collected) + assert result["status"] == TestStatus.SUCCESS.value + + def test_validate_properties_failure(self, config_module, sample_check): + """Test properties validation with missing properties""" + sample_check.validator_args = {"properties": [{"property": "cpu", "value": "8"}]} + sample_check.severity = TestSeverity.CRITICAL + collected = json.dumps({"cpu": "4"}) + result = config_module.validate_properties(sample_check, collected) + assert result["status"] == TestStatus.ERROR.value + + def test_validate_properties_invalid_json(self, config_module, sample_check): + """Test properties validation with invalid JSON""" + sample_check.validator_args = {"properties": []} + result = config_module.validate_properties(sample_check, "invalid json") + assert result["status"] == TestStatus.ERROR.value + + def test_validate_vm_support_success(self, config_module, sample_check): + """Test VM support validation with supported configuration""" + config_module.set_context( + { + "role": "db", + "database_type": "HANA", + "supported_configurations": { + "VMs": {"Standard_M32ts": {"db": {"SupportedDB": ["HANA", "DB2"]}}} + }, + } + ) + sample_check.validator_args = {"validation_rules": "VMs"} + result = config_module.validate_vm_support(sample_check, "Standard_M32ts") + assert result["status"] == TestStatus.SUCCESS.value + + def test_validate_vm_support_unsupported(self, config_module, sample_check): + """Test VM support validation with unsupported configuration""" + config_module.set_context( + { + "role": "db", + "database_type": "Oracle", + "supported_configurations": { + "VMs": {"Standard_M32ts": {"db": {"SupportedDB": ["HANA"]}}} + }, + } + ) + sample_check.validator_args = {"validation_rules": "VMs"} + result = config_module.validate_vm_support(sample_check, "Standard_M32ts") + assert result["status"] == TestStatus.ERROR.value + + +class TestValidateResult: + """Test suite for validate_result method""" + + def test_validate_result_with_registered_validator(self, config_module, sample_check): + """Test validate_result with registered validator""" + sample_check.validator_type = "string" + sample_check.validator_args = {"expected": "test"} + result = config_module.validate_result(sample_check, "test") + assert "status" in result + assert result["status"] == TestStatus.SUCCESS.value + + def test_validate_result_with_unregistered_validator(self, config_module, sample_check): + """Test validate_result with unregistered validator""" + sample_check.validator_type = "unknown_validator" + result = config_module.validate_result(sample_check, "data") + assert result["status"] == TestStatus.ERROR.value + assert "not found" in result["details"] + + +class TestExecuteCheck: + """Test suite for execute_check method""" + + def test_execute_check_success(self, config_module, sample_check, monkeypatch): + """Test successful check execution""" + config_module.set_context({"hostname": "testhost"}) + + def mock_collect(check, context): + return "test" + + with patch("src.module_utils.collector.CommandCollector.collect", side_effect=mock_collect): + result = config_module.execute_check(sample_check) + assert isinstance(result, CheckResult) + assert result.status == TestStatus.SUCCESS.value + assert result.hostname == "testhost" + + def test_execute_check_not_applicable(self, config_module, sample_check): + """Test check execution when check is not applicable""" + config_module.set_context({"os": "RHEL"}) + sample_check.applicability = [ApplicabilityRule(property="os", value="SLES")] + result = config_module.execute_check(sample_check) + assert result.status == TestStatus.SKIPPED.value + assert "not applicable" in result.details + + def test_execute_check_info_severity(self, config_module, sample_check, monkeypatch): + """Test check execution with INFO severity""" + config_module.set_context({"hostname": "testhost"}) + sample_check.severity = TestSeverity.INFO + + def mock_collect(check, context): + return "info_data" + + with patch("src.module_utils.collector.CommandCollector.collect", side_effect=mock_collect): + result = config_module.execute_check(sample_check) + assert result.status == TestStatus.INFO.value + + def test_execute_check_collector_not_found(self, config_module, sample_check): + """Test check execution with unknown collector""" + config_module.set_context({"hostname": "testhost"}) + sample_check.collector_type = "unknown_collector" + result = config_module.execute_check(sample_check) + assert result.status == TestStatus.ERROR.value + assert "not found" in result.details + + def test_execute_check_exception_handling(self, config_module, sample_check): + """Test check execution handles exceptions""" + config_module.set_context({"hostname": "testhost"}) + + def mock_collect_error(check, context): + raise Exception("Collection failed") + + with patch( + "src.module_utils.collector.CommandCollector.collect", side_effect=mock_collect_error + ): + result = config_module.execute_check(sample_check) + assert result.status == TestStatus.ERROR.value + assert "Error" in result.details + + +class TestExecuteCheckWithRetry: + """Test suite for execute_check_with_retry method""" + + def test_execute_check_with_retry_success_first_attempt(self, config_module, sample_check): + """Test retry mechanism succeeds on first attempt""" + config_module.set_context({"hostname": "testhost"}) + + def mock_collect(check, context): + return "test" + + with patch("src.module_utils.collector.CommandCollector.collect", side_effect=mock_collect): + result = config_module.execute_check_with_retry(sample_check, max_retries=3) + assert result.status == TestStatus.SUCCESS.value + + def test_execute_check_with_retry_eventual_success(self, config_module, sample_check): + """Test retry mechanism succeeds on first attempt (no retry needed)""" + config_module.set_context({"hostname": "testhost"}) + + def mock_collect(check, context): + return "test" + + with patch("src.module_utils.collector.CommandCollector.collect", side_effect=mock_collect): + with patch("time.sleep"): # Skip actual sleep + result = config_module.execute_check_with_retry(sample_check, max_retries=3) + assert result.status == TestStatus.SUCCESS.value + + def test_execute_check_with_retry_all_attempts_fail(self, config_module, sample_check): + """Test retry mechanism fails after all attempts""" + config_module.set_context({"hostname": "testhost"}) + + def mock_collect_error(check, context): + raise Exception("Persistent failure") + + with patch( + "src.module_utils.collector.CommandCollector.collect", side_effect=mock_collect_error + ): + with patch("time.sleep"): # Skip actual sleep + result = config_module.execute_check_with_retry(sample_check, max_retries=3) + assert result.status == TestStatus.ERROR.value + assert result.details is not None + assert "Error" in result.details or "failure" in result.details + + +class TestBuildExecutionOrder: + """Test suite for build_execution_order method""" + + def test_build_execution_order_no_dependencies(self, config_module): + """Test building execution order with no dependencies""" + checks = [ + Check( + id="check1", + name="Check 1", + description="Test check 1", + category="System", + workload="SAP", + severity=TestSeverity.INFO, + ), + Check( + id="check2", + name="Check 2", + description="Test check 2", + category="System", + workload="SAP", + severity=TestSeverity.INFO, + ), + ] + batches = config_module.build_execution_order(checks) + assert len(batches) == 1 + assert len(batches[0]) == 2 + + def test_build_execution_order_simple(self, config_module): + """Test building execution order returns all checks in single batch""" + check1 = Check( + id="check1", + name="Check 1", + description="Test check 1", + category="System", + workload="SAP", + severity=TestSeverity.INFO, + ) + check2 = Check( + id="check2", + name="Check 2", + description="Test check 2", + category="System", + workload="SAP", + severity=TestSeverity.INFO, + ) + + batches = config_module.build_execution_order([check1, check2]) + assert len(batches) >= 1 + total_checks = sum(len(batch) for batch in batches) + assert total_checks == 2 + + +class TestExecuteChecks: + """Test suite for execute_checks method""" + + def test_execute_checks_sequential(self, config_module): + """Test sequential check execution""" + config_module.set_context({"hostname": "testhost"}) + yaml_content = """ +checks: + - id: check_001 + name: Test Check + category: System + severity: INFO + collector_type: command + collector_args: + command: "echo test" + validator_type: string + validator_args: + expected: "test" +""" + config_module.load_checks(yaml_content) + + with patch("src.module_utils.collector.CommandCollector.collect", return_value="test"): + results = config_module.execute_checks(parallel=False) + assert len(results) == 1 + assert results[0].status == TestStatus.INFO.value + + def test_execute_checks_with_tag_filter(self, config_module): + """Test check execution with tag filtering""" + config_module.set_context({"hostname": "testhost"}) + yaml_content = """ +checks: + - id: check_001 + name: Check 1 + tags: [production, system] + - id: check_002 + name: Check 2 + tags: [development, network] +""" + config_module.load_checks(yaml_content) + + with patch("src.module_utils.collector.CommandCollector.collect", return_value="test"): + results = config_module.execute_checks(filter_tags=["production"]) + assert len(results) == 1 + assert results[0].check.id == "check_001" + + def test_execute_checks_with_category_filter(self, config_module): + """Test check execution with category filtering""" + config_module.set_context({"hostname": "testhost"}) + yaml_content = """ +checks: + - id: check_001 + name: Check 1 + category: System + - id: check_002 + name: Check 2 + category: Network +""" + config_module.load_checks(yaml_content) + + with patch("src.module_utils.collector.CommandCollector.collect", return_value="test"): + results = config_module.execute_checks(filter_categories=["System"]) + assert len(results) == 1 + assert results[0].check.category == "System" + + def test_execute_checks_no_matching_filters(self, config_module): + """Test check execution with filters that match nothing""" + config_module.set_context({"hostname": "testhost"}) + yaml_content = """ +checks: + - id: check_001 + name: Check 1 + tags: [production] +""" + config_module.load_checks(yaml_content) + results = config_module.execute_checks(filter_tags=["nonexistent"]) + assert len(results) == 0 + + +class TestGetResultsSummary: + """Test suite for get_results_summary method""" + + def test_get_results_summary_empty(self, config_module): + """Test summary with no results""" + summary = config_module.get_results_summary() + assert summary["total"] == 0 + assert summary["passed"] == 0 + assert summary["failed"] == 0 + + def test_get_results_summary_with_results(self, config_module, sample_check): + """Test summary with mixed results""" + # Create mock result objects with status as string values for summary + result1 = Mock() + result1.status = TestStatus.SUCCESS.value + result1.check = sample_check + + result2 = Mock() + result2.status = TestStatus.ERROR.value + result2.check = sample_check + + result3 = Mock() + result3.status = TestStatus.WARNING.value + result3.check = sample_check + + config_module.result["check_results"] = [result1, result2, result3] + + summary = config_module.get_results_summary() + assert summary["total"] == 3 + assert summary["passed"] == 1 + assert summary["failed"] == 1 + assert summary["warnings"] == 1 + + +class TestFormatResultsForHtmlReport: + """Test suite for format_results_for_html_report method""" + + def test_format_results_removes_context_templates(self, config_module, sample_check): + """Test that CONTEXT templates are neutralized in formatted results""" + sample_check.collector_args = {"command": "echo {{ CONTEXT.hostname }}"} + config_module.result["check_results"] = [ + CheckResult( + check=sample_check, + status=TestStatus.SUCCESS, + hostname="test", + expected_value="", + actual_value="", + execution_time=0, + timestamp=datetime.now(), + ) + ] + config_module.format_results_for_html_report() + formatted = config_module.result["check_results"][0] + assert "{{ CONTEXT" not in str(formatted["check"]["collector_args"]) + assert "<" in str(formatted["check"]["collector_args"]) + + def test_format_results_serialization(self, config_module, sample_check): + """Test that results are properly serialized for HTML""" + config_module.result["check_results"] = [ + CheckResult( + check=sample_check, + status=TestStatus.SUCCESS, + hostname="testhost", + expected_value="expected", + actual_value="actual", + execution_time=10, + timestamp=datetime.now(), + ) + ] + config_module.format_results_for_html_report() + result = config_module.result["check_results"][0] + assert isinstance(result, dict) + assert "check" in result + assert "status" in result + assert result["hostname"] == "testhost" + + +class TestRunMethod: + """Test suite for the main run method""" + + def test_run_successful_execution(self, mock_ansible_module): + """Test successful run with valid checks""" + mock_ansible_module.params.update( + { + "check_file_content": """ +checks: + - id: check_001 + name: Test Check + severity: INFO + collector_type: command + collector_args: + command: "echo test" + validator_type: string + validator_args: + expected: "test" +""", + "context": {"hostname": "testhost", "os": "SLES"}, + } + ) + + module = ConfigurationCheckModule(mock_ansible_module) + + with patch("src.module_utils.collector.CommandCollector.collect", return_value="test"): + module.run() + + assert len(mock_ansible_module.exit_calls) == 1 + result = mock_ansible_module.exit_calls[0] + assert "check_results" in result + assert "summary" in result + + def test_run_no_check_content(self, mock_ansible_module): + """Test run fails with no check content""" + mock_ansible_module.params["check_file_content"] = None + mock_ansible_module.params["context"] = {"hostname": "testhost"} + + module = ConfigurationCheckModule(mock_ansible_module) + module.run() + + assert len(mock_ansible_module.fail_calls) == 1 + assert "No check file content" in mock_ansible_module.fail_calls[0]["msg"] + + def test_run_exception_handling(self, mock_ansible_module): + """Test run handles exceptions gracefully""" + mock_ansible_module.params.update( + { + "check_file_content": "invalid: yaml: content:", + "context": {"hostname": "testhost"}, + } + ) + + module = ConfigurationCheckModule(mock_ansible_module) + + with patch.object(module, "parse_yaml_from_content", side_effect=Exception("Parse error")): + module.run() + + assert len(mock_ansible_module.fail_calls) == 1 + assert "failed" in mock_ansible_module.fail_calls[0]["msg"] + + +class TestCreateValidationResult: + """Test suite for _create_validation_result method""" + + def test_create_validation_result_success(self, config_module): + """Test validation result creation for success""" + result = config_module._create_validation_result(TestSeverity.WARNING, True) + assert result == TestStatus.SUCCESS.value + + def test_create_validation_result_failure_severity_mapping(self, config_module): + """Test validation result maps severity to status on failure""" + assert ( + config_module._create_validation_result(TestSeverity.INFO, False) + == TestStatus.INFO.value + ) + assert ( + config_module._create_validation_result(TestSeverity.WARNING, False) + == TestStatus.WARNING.value + ) + assert ( + config_module._create_validation_result(TestSeverity.CRITICAL, False) + == TestStatus.ERROR.value + ) + + +class TestExecuteChecksParallel: + """Test suite for execute_checks_parallel method""" + + def test_execute_checks_parallel_basic(self, config_module): + """Test parallel execution with basic checks""" + config_module.set_context({"hostname": "testhost"}) + yaml_content = """ +checks: + - id: check_001 + name: Test Check 1 + category: System + severity: INFO + collector_type: command + collector_args: + command: "echo test1" + validator_type: string + validator_args: + expected: "test1" + - id: check_002 + name: Test Check 2 + category: Network + severity: INFO + collector_type: command + collector_args: + command: "echo test2" + validator_type: string + validator_args: + expected: "test2" +""" + config_module.load_checks(yaml_content) + + with patch("src.module_utils.collector.CommandCollector.collect") as mock_collect: + mock_collect.side_effect = lambda check, context: f"test{check.id[-1]}" + results = config_module.execute_checks_parallel(max_workers=2, enable_retry=False) + assert len(results) == 2 + + def test_execute_checks_parallel_with_retry_enabled(self, config_module): + """Test parallel execution with retry enabled""" + config_module.set_context({"hostname": "testhost"}) + yaml_content = """ +checks: + - id: check_001 + name: Test Check + category: System + severity: INFO + collector_type: command +""" + config_module.load_checks(yaml_content) + + with patch("src.module_utils.collector.CommandCollector.collect", return_value="test"): + results = config_module.execute_checks_parallel(max_workers=1, enable_retry=True) + assert len(results) == 1 + + def test_execute_checks_parallel_no_checks_after_filter(self, config_module): + """Test parallel execution with filters that match nothing""" + config_module.set_context({"hostname": "testhost"}) + yaml_content = """ +checks: + - id: check_001 + name: Test Check + tags: [production] +""" + config_module.load_checks(yaml_content) + results = config_module.execute_checks_parallel(filter_tags=["nonexistent"]) + assert len(results) == 0 + + def test_execute_checks_parallel_execution_summary(self, config_module): + """Test parallel execution updates result with execution summary""" + config_module.set_context({"hostname": "testhost"}) + yaml_content = """ +checks: + - id: check_001 + name: Test Check + category: System + severity: INFO +""" + config_module.load_checks(yaml_content) + + with patch("src.module_utils.collector.CommandCollector.collect", return_value="test"): + config_module.execute_checks_parallel(max_workers=1) + assert "execution_summary" in config_module.result + assert "total_checks" in config_module.result["execution_summary"] + assert "execution_time" in config_module.result["execution_summary"] + + +class TestParseYamlFromContent: + """Test suite for parse_yaml_from_content method""" + + def test_parse_yaml_from_content_valid(self, config_module): + """Test parsing valid YAML content""" + yaml_str = """ +checks: + - id: test + name: Test + description: Test description +""" + parsed = config_module.parse_yaml_from_content(yaml_str) + assert "checks" in parsed + assert isinstance(parsed["checks"], list) + + def test_parse_yaml_from_content_with_applicability_list(self, config_module): + """Test parsing YAML with applicability as list""" + yaml_str = """ +checks: + - id: test + name: Test + description: Test + category: System + workload: SAP + applicability: + - property: os + value: SLES + - property: version + value: "15" +""" + parsed = config_module.parse_yaml_from_content(yaml_str) + assert "checks" in parsed + check_data = parsed["checks"][0] + assert "applicability" in check_data + + +class TestCollectorRegistration: + """Test suite for collector registration""" + + def test_register_custom_collector(self, config_module): + """Test registering a custom collector""" + + class CustomCollector: + pass + + config_module._collector_registry["custom"] = CustomCollector + assert "custom" in config_module._collector_registry + assert config_module._collector_registry["custom"] == CustomCollector + + def test_execute_check_with_custom_collector(self, config_module, sample_check): + """Test executing check with unregistered collector type""" + config_module.set_context({"hostname": "testhost"}) + sample_check.collector_type = "unregistered_type" + result = config_module.execute_check(sample_check) + assert result.status == TestStatus.ERROR.value + + +class TestEdgeCases: + """Test suite for edge cases and error conditions""" + + def test_execute_check_with_empty_context(self, config_module, sample_check): + """Test executing check with empty context""" + config_module.set_context({}) + + with patch("src.module_utils.collector.CommandCollector.collect", return_value="test"): + result = config_module.execute_check(sample_check) + assert isinstance(result, CheckResult) + + def test_validate_string_with_none_collected_data(self, config_module, sample_check): + """Test string validation with None collected data""" + sample_check.validator_args = {"expected": "test"} + result = config_module.validate_string(sample_check, None) + assert result["status"] in [TestStatus.WARNING.value, TestStatus.ERROR.value] + + def test_validate_numeric_range_with_min_only(self, config_module, sample_check): + """Test numeric range validation with only min specified""" + sample_check.validator_args = {"min": 10} + result = config_module.validate_numeric_range(sample_check, "50") + assert result["status"] == TestStatus.SUCCESS.value + + def test_validate_numeric_range_with_max_only(self, config_module, sample_check): + """Test numeric range validation with only max specified""" + sample_check.validator_args = {"max": 100} + result = config_module.validate_numeric_range(sample_check, "50") + assert result["status"] == TestStatus.SUCCESS.value + + def test_validate_list_with_empty_list(self, config_module, sample_check): + """Test list validation with empty valid list""" + sample_check.validator_args = {"valid_list": []} + sample_check.severity = TestSeverity.WARNING + result = config_module.validate_list(sample_check, "any_value") + assert result["status"] == TestStatus.WARNING.value + + def test_is_check_applicable_missing_context_property(self, config_module, sample_check): + """Test applicability check with missing context property""" + config_module.set_context({"os": "SLES"}) + sample_check.applicability = [ApplicabilityRule(property="missing_prop", value="value")] + try: + result = config_module.is_check_applicable(sample_check) + # If no exception, result should be False + assert result is False + except KeyError: + # Expected behavior when property is missing + pass + + def test_validate_properties_with_partial_match(self, config_module, sample_check): + """Test properties validation with some matching and some non-matching""" + sample_check.validator_args = { + "properties": [ + {"property": "cpu", "value": "4"}, + {"property": "memory", "value": "32GB"}, + ] + } + sample_check.severity = TestSeverity.WARNING + collected = json.dumps({"cpu": "4", "memory": "16GB"}) + result = config_module.validate_properties(sample_check, collected) + assert result["status"] in [TestStatus.WARNING.value, TestStatus.ERROR.value] + + def test_execute_checks_with_multiple_filters(self, config_module): + """Test check execution with both tag and category filters""" + config_module.set_context({"hostname": "testhost"}) + yaml_content = """ +checks: + - id: check_001 + name: Check 1 + category: System + tags: [production, critical] + - id: check_002 + name: Check 2 + category: Network + tags: [production] + - id: check_003 + name: Check 3 + category: System + tags: [development] +""" + config_module.load_checks(yaml_content) + + with patch("src.module_utils.collector.CommandCollector.collect", return_value="test"): + results = config_module.execute_checks( + filter_tags=["production"], filter_categories=["System"] + ) + assert len(results) == 1 + assert results[0].check.id == "check_001" From cf520976580bfde969c766bb74ac610407c7e615 Mon Sep 17 00:00:00 2001 From: devanshjain Date: Tue, 21 Oct 2025 20:55:13 +0000 Subject: [PATCH 02/17] Enhance HAClusterValidator to support multiple DR provider configurations and update related tests --- src/modules/get_pcmk_properties_db.py | 77 ++++++------ .../ha_db_hana/tasks/files/constants.yaml | 107 ++++++++++++----- tests/modules/get_pcmk_properties_db_test.py | 111 ++++++++++++++++-- 3 files changed, 217 insertions(+), 78 deletions(-) diff --git a/src/modules/get_pcmk_properties_db.py b/src/modules/get_pcmk_properties_db.py index b5e5c133..7e6c4cc8 100644 --- a/src/modules/get_pcmk_properties_db.py +++ b/src/modules/get_pcmk_properties_db.py @@ -273,45 +273,44 @@ def _parse_global_ini_parameters(self): ) as file: global_ini_content = file.read().splitlines() - section_start = ( - global_ini_content.index("[ha_dr_provider_sushanasr]") - if self.saphanasr_provider == HanaSRProvider.ANGI - else global_ini_content.index("[ha_dr_provider_SAPHanaSR]") - ) - properties_slice = global_ini_content[section_start + 1 : section_start + 4] - - global_ini_properties = { - key.strip(): val.strip() - for line in properties_slice - for key, sep, val in [line.partition("=")] - if sep - } - - for param_name, expected_config in global_ini_defaults.items(): - value = global_ini_properties.get(param_name, "") - if isinstance(expected_config, dict): - expected_value = expected_config.get("value") - is_required = expected_config.get("required", False) - else: - expected_value = expected_config - is_required = False - - self.log( - logging.INFO, - f"param_name: {param_name}, value: {value}, expected_value: {expected_config}", - ) - parameters.append( - self._create_parameter( - category="global_ini", - name=param_name, - value=value, - expected_value=( - expected_config.get("value") - if isinstance(expected_config, dict) - else expected_value - ), - ) - ) + for section_name, section_properties in global_ini_defaults.items(): + try: + section_start = global_ini_content.index(f"[{section_name}]") + next_section_start = len(global_ini_content) + for i in range(section_start + 1, len(global_ini_content)): + if global_ini_content[i].strip().startswith("["): + next_section_start = i + break + + properties_slice = global_ini_content[section_start + 1 : next_section_start] + + global_ini_properties = { + key.strip(): val.strip() + for line in properties_slice + for key, sep, val in [line.partition("=")] + if sep and key.strip() + } + + for param_name, expected_config in section_properties.items(): + value = global_ini_properties.get(param_name, "") + expected_value = expected_config.get("value", "") + + self.log( + logging.INFO, + f"param_name: {param_name}, value: {value}, " + + f"Fexpected_value: {expected_value}", + ) + parameters.append( + self._create_parameter( + category="global_ini", + subcategory=section_name, + name=param_name, + value=value, + expected_value=expected_value, + ) + ) + except ValueError: + self.log(logging.WARNING, f"Section {section_name} not found in global.ini") except Exception as ex: self.log(logging.ERROR, f"Error parsing global.ini: {str(ex)}") diff --git a/src/roles/ha_db_hana/tasks/files/constants.yaml b/src/roles/ha_db_hana/tasks/files/constants.yaml index 39c2b391..a18cf7ef 100644 --- a/src/roles/ha_db_hana/tasks/files/constants.yaml +++ b/src/roles/ha_db_hana/tasks/files/constants.yaml @@ -801,36 +801,89 @@ OS_PARAMETERS: GLOBAL_INI: SUSE: SAPHanaSR: - provider: - value: "SAPHanaSR" - required: true - path: - value: ["/usr/share/SAPHanaSR", "/hana/shared/myHooks"] - required: true - execution_order: - value: "1" - required: true + ha_dr_provider_SAPHanaSR: + provider: + value: "SAPHanaSR" + required: true + path: + value: ["/usr/share/SAPHanaSR", "/hana/shared/myHooks"] + required: true + execution_order: + value: "1" + required: true + ha_dr_provider_suschksrv: + provider: + value: "susChkSrv" + required: true + path: + value: ["/usr/share/SAPHanaSR", "/hana/shared/myHooks"] + required: true + execution_order: + value: "3" + required: true + action_on_host: + value: "fence" + required: true + trace: + ha_dr_sushanasr: + required: false SAPHanaSR-angi: - provider: - value: "susHanaSR" - required: true - path: - value: ["/usr/share/SAPHanaSR-angi", "/hana/shared/myHooks"] - required: true - execution_order: - value: "1" - required: true + ha_dr_provider_sushanasr: + provider: + value: "susHanaSR" + required: true + path: + value: ["/usr/share/SAPHanaSR-angi", "/hana/shared/myHooks"] + required: true + execution_order: + value: "1" + required: true + ha_dr_provider_suschksrv: + provider: + value: "susChkSrv" + required: true + path: + value: ["/usr/share/SAPHanaSR-angi", "/hana/shared/myHooks"] + required: true + execution_order: + value: "3" + required: true + action_on_host: + value: "fence" + required: true + trace: + ha_dr_sushanasr: + required: false + ha_dr_suschksrv: + required: false REDHAT: SAPHanaSR: - provider: - value: "SAPHanaSR" - required: true - path: - value: ["/usr/share/SAPHanaSR/srHook", "/hana/shared/myHooks"] - required: true - execution_order: - value: "1" - required: true + ha_dr_provider_SAPHanaSR: + provider: + value: "SAPHanaSR" + required: true + path: + value: ["/usr/share/SAPHanaSR/srHook", "/hana/shared/myHooks"] + required: true + execution_order: + value: "1" + required: true + ha_dr_provider_suschksrv: + provider: + value: "susChkSrv" + required: true + path: + value: ["/usr/share/SAPHanaSR", "/hana/shared/myHooks"] + required: true + execution_order: + value: "3" + required: true + action_on_host: + value: "fence" + required: true + trace: + ha_dr_sushanasr: + required: false # === Azure Load Balancer === # Azure Load Balancer configuration diff --git a/tests/modules/get_pcmk_properties_db_test.py b/tests/modules/get_pcmk_properties_db_test.py index bdd75f7e..79ae278a 100644 --- a/tests/modules/get_pcmk_properties_db_test.py +++ b/tests/modules/get_pcmk_properties_db_test.py @@ -133,15 +133,34 @@ provider = SAPHanaSR path = /usr/share/SAPHanaSR execution_order = 1 + +[ha_dr_provider_suschksrv] +provider = susChkSrv +path = /usr/share/SAPHanaSR +execution_order = 3 +action_on_host = fence + +[trace] +ha_dr_sushanasr = info """ DUMMY_GLOBAL_INI_ANGI = """[DEFAULT] dummy1 = dummy2 [ha_dr_provider_sushanasr] -provider = SAPHanaSR-angi +provider = susHanaSR path = /usr/share/SAPHanaSR-angi execution_order = 1 + +[ha_dr_provider_suschksrv] +provider = susChkSrv +path = /usr/share/SAPHanaSR-angi +execution_order = 3 +action_on_host = fence + +[trace] +ha_dr_sushanasr = info +ha_dr_suschksrv = info """ DUMMY_CONSTANTS = { @@ -213,15 +232,51 @@ "GLOBAL_INI": { "REDHAT": { "SAPHanaSR": { - "provider": {"value": "SAPHanaSR", "required": False}, - "path": {"value": "/usr/share/SAPHanaSR", "required": False}, - "execution_order": {"value": ["1", "2"], "required": False}, + "ha_dr_provider_SAPHanaSR": { + "provider": {"value": "SAPHanaSR", "required": True}, + "path": { + "value": ["/usr/share/SAPHanaSR", "/hana/shared/myHooks"], + "required": True, + }, + "execution_order": {"value": "1", "required": True}, + }, + "ha_dr_provider_suschksrv": { + "provider": {"value": "susChkSrv", "required": True}, + "path": { + "value": ["/usr/share/SAPHanaSR", "/hana/shared/myHooks"], + "required": True, + }, + "execution_order": {"value": "3", "required": True}, + "action_on_host": {"value": "fence", "required": True}, + }, + "trace": { + "ha_dr_sushanasr": {"required": False}, + }, } }, "SUSE": { "SAPHanaSR-angi": { - "provider": {"value": "SAPHanaSR-angi", "required": False}, - "path": {"value": "/usr/share/SAPHanaSR-angi", "required": False}, + "ha_dr_provider_sushanasr": { + "provider": {"value": "susHanaSR", "required": True}, + "path": { + "value": ["/usr/share/SAPHanaSR-angi", "/hana/shared/myHooks"], + "required": True, + }, + "execution_order": {"value": "1", "required": True}, + }, + "ha_dr_provider_suschksrv": { + "provider": {"value": "susChkSrv", "required": True}, + "path": { + "value": ["/usr/share/SAPHanaSR-angi", "/hana/shared/myHooks"], + "required": True, + }, + "execution_order": {"value": "3", "required": True}, + "action_on_host": {"value": "fence", "required": True}, + }, + "trace": { + "ha_dr_sushanasr": {"required": False}, + "ha_dr_suschksrv": {"required": False}, + }, } }, }, @@ -257,6 +312,7 @@ class MockOpen: def __init__(self, file_content): self.file_content = file_content + self.call_count = 0 def __call__(self, *args, **kwargs): return io.StringIO(self.file_content) @@ -400,8 +456,10 @@ def test_parse_global_ini_parameters_saphanasr(self, validator): params = validator._parse_global_ini_parameters() assert len(params) > 0 provider_params = [p for p in params if p["name"] == "provider"] - assert len(provider_params) == 1 - assert provider_params[0]["value"] == "SAPHanaSR" + assert len(provider_params) == 2 + provider_values = [p["value"] for p in provider_params] + assert "SAPHanaSR" in provider_values + assert "susChkSrv" in provider_values def test_parse_global_ini_parameters_angi(self, validator_angi): """ @@ -410,8 +468,10 @@ def test_parse_global_ini_parameters_angi(self, validator_angi): params = validator_angi._parse_global_ini_parameters() assert len(params) > 0 provider_params = [p for p in params if p["name"] == "provider"] - assert len(provider_params) == 1 - assert provider_params[0]["value"] == "SAPHanaSR-angi" + assert len(provider_params) == 2 + provider_values = [p["value"] for p in provider_params] + assert "susHanaSR" in provider_values + assert "susChkSrv" in provider_values def test_parse_global_ini_parameters_with_list_expected_value(self, validator): """ @@ -419,8 +479,9 @@ def test_parse_global_ini_parameters_with_list_expected_value(self, validator): """ params = validator._parse_global_ini_parameters() execution_params = [p for p in params if p["name"] == "execution_order"] - if execution_params: - assert execution_params[0]["status"] in [ + assert len(execution_params) == 2 + for param in execution_params: + assert param["status"] in [ TestStatus.SUCCESS.value, TestStatus.INFO.value, ] @@ -589,6 +650,32 @@ def test_global_ini_section_detection(self, validator_angi): params = validator_angi._parse_global_ini_parameters() assert isinstance(params, list) + def test_parse_global_ini_multiple_sections(self, validator): + """ + Test that multiple sections are parsed correctly from global.ini. + """ + params = validator._parse_global_ini_parameters() + assert len(params) == 8 + param_names = [p["name"] for p in params] + assert param_names.count("provider") == 2 + assert param_names.count("path") == 2 + assert param_names.count("execution_order") == 2 + assert param_names.count("action_on_host") == 1 + + def test_parse_global_ini_angi_multiple_sections(self, validator_angi): + """ + Test that multiple sections are parsed correctly for ANGI provider. + """ + params = validator_angi._parse_global_ini_parameters() + assert len(params) == 9 + param_names = [p["name"] for p in params] + assert param_names.count("provider") == 2 + assert param_names.count("path") == 2 + assert param_names.count("execution_order") == 2 + assert param_names.count("action_on_host") == 1 + assert param_names.count("ha_dr_sushanasr") == 1 + assert param_names.count("ha_dr_suschksrv") == 1 + def test_get_expected_value_methods(self, validator): """ Test inherited expected value methods. From 1c4e82b0fb121a80526461f653bebbc0318bfb93 Mon Sep 17 00:00:00 2001 From: devanshjain Date: Tue, 21 Oct 2025 21:20:33 +0000 Subject: [PATCH 03/17] Fix parameter logging in HAClusterValidator and update global INI configuration keys in constants.yaml --- src/modules/get_pcmk_properties_db.py | 4 ++-- src/roles/ha_db_hana/tasks/files/constants.yaml | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/modules/get_pcmk_properties_db.py b/src/modules/get_pcmk_properties_db.py index 7e6c4cc8..26cfab0b 100644 --- a/src/modules/get_pcmk_properties_db.py +++ b/src/modules/get_pcmk_properties_db.py @@ -298,12 +298,12 @@ def _parse_global_ini_parameters(self): self.log( logging.INFO, f"param_name: {param_name}, value: {value}, " - + f"Fexpected_value: {expected_value}", + + f"expected_value: {expected_value}", ) parameters.append( self._create_parameter( category="global_ini", - subcategory=section_name, + id=section_name, name=param_name, value=value, expected_value=expected_value, diff --git a/src/roles/ha_db_hana/tasks/files/constants.yaml b/src/roles/ha_db_hana/tasks/files/constants.yaml index a18cf7ef..bc3c2141 100644 --- a/src/roles/ha_db_hana/tasks/files/constants.yaml +++ b/src/roles/ha_db_hana/tasks/files/constants.yaml @@ -882,7 +882,9 @@ GLOBAL_INI: value: "fence" required: true trace: - ha_dr_sushanasr: + ha_dr_saphanasr: + required: false + ha_dr_chksrv: required: false # === Azure Load Balancer === From 28174e85c5329140d4cc70631a1cd70b5961e70c Mon Sep 17 00:00:00 2001 From: devanshjain Date: Wed, 22 Oct 2025 06:20:49 +0000 Subject: [PATCH 04/17] Refactor conditionals in configuration checks and update Azure Load Balancer test parameters for clarity --- src/playbook_00_configuration_checks.yml | 9 ++++----- tests/modules/get_azure_lb_test.py | 12 ++++++++---- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/src/playbook_00_configuration_checks.yml b/src/playbook_00_configuration_checks.yml index e48e614d..ef1ab814 100644 --- a/src/playbook_00_configuration_checks.yml +++ b/src/playbook_00_configuration_checks.yml @@ -137,11 +137,10 @@ file_name: "high_availability" checks_var: "db_ha_config_checks" results_var: "db_ha_config_results" - when: >- - role == 'DB' and - database_high_availability | default(false) | bool and - (configuration_test_type == 'all' or - configuration_test_type == 'Database') + when: + - role == 'DB' and + - database_high_availability | default(false) | bool + - configuration_test_type == 'all' or configuration_test_type == 'Database') - name: "Run role-specific checks for ASCS/SCS hosts" ansible.builtin.include_tasks: diff --git a/tests/modules/get_azure_lb_test.py b/tests/modules/get_azure_lb_test.py index da96acd1..e85491b4 100644 --- a/tests/modules/get_azure_lb_test.py +++ b/tests/modules/get_azure_lb_test.py @@ -84,11 +84,13 @@ def azure_lb(self, mocker): ), "constants": { "AZURE_LOADBALANCER": { - "RULES": {"idle_timeout_in_minutes": 4, "enable_floating_ip": False}, + "RULES": { + "idle_timeout_in_minutes": {"value": 4, "required": True}, + "enable_floating_ip": {"value": False, "required": True}, + }, "PROBES": { - "interval_in_seconds": 5, - "number_of_probes": 3, - "timeout_in_seconds": 4, + "interval_in_seconds": {"value": 5, "required": True}, + "number_of_probes": {"value": 3, "required": True}, }, } }, @@ -113,6 +115,8 @@ def test_get_load_balancers_details(self, azure_lb): :type azure_lb: AzureLoadBalancer """ azure_lb.get_load_balancers_details() + print(f"\n\nResult: {azure_lb.result}") + print(f"Parameters: {azure_lb.result['details']['parameters']}") assert azure_lb.result["status"] == "PASSED" assert azure_lb.result["details"]["parameters"] is not None From a96cd4eef7587318f598f67943ac9cc81f8ef4db Mon Sep 17 00:00:00 2001 From: devanshjain Date: Wed, 22 Oct 2025 06:40:21 +0000 Subject: [PATCH 05/17] Enhance configuration checks to support IBM DB2 alongside SAP HANA, updating relevant documentation and playbook tasks for clarity and functionality. --- docs/CONFIGURATION_CHECKS.md | 14 +- src/modules/configuration_check_module.py | 2 +- src/playbook_00_configuration_checks.yml | 49 +- .../configuration_checks/tasks/files/db2.yml | 537 ++++++++++++++++++ src/roles/configuration_checks/tasks/main.yml | 4 +- 5 files changed, 592 insertions(+), 14 deletions(-) create mode 100644 src/roles/configuration_checks/tasks/files/db2.yml diff --git a/docs/CONFIGURATION_CHECKS.md b/docs/CONFIGURATION_CHECKS.md index 2ccc9b65..a312a48a 100644 --- a/docs/CONFIGURATION_CHECKS.md +++ b/docs/CONFIGURATION_CHECKS.md @@ -29,17 +29,17 @@ Configuration validation serves as a critical quality gate in the SAP deployment - Storage account redundancy settings - Disk caching policies -**SAP HANA Configuration** -- Memory allocation -- System replication parameters +**SAP Database Configuration** +- SAP HANA: Memory allocation, system replication parameters +- IBM DB2: Hardware requirements, system language, OS tuning parameters -**Pacemaker Cluster** +**Pacemaker Cluster (HANA only)** - Resource agent versions and parameters - Fencing (STONITH) configuration - Resource constraints and colocation rules - Cluster communication settings -**SAP HA Resources** +**SAP HA Resources (HANA only)** - Virtual hostname configuration - File system mount options - Service startup ordering @@ -56,6 +56,8 @@ Update the `TEST_TYPE` parameter in [`vars.yaml`](./../vars.yaml) file to `Confi Follow the steps (2.1 - 2.2) in [Setup Guide for SAP Testing Automation Framework](./SETUP.MD#2-system-configuration) to configure your system details. +> **Note**: High Availability (HA) configuration checks and functional tests are currently supported only for SAP HANA databases. For IBM DB2 databases, only non-HA configuration checks are available. + ### 3. Test Execution @@ -71,7 +73,7 @@ To execute the script, run following command: # Run checks with verbose logging ./scripts/sap_automation_qa.sh -vv -# Run only Database (HANA) configuration checks +# Run only Database configuration checks (supports both HANA and DB2) ./scripts/sap_automation_qa.sh --extra-vars='{"configuration_test_type":"Database"}' # Run only ASCS/ERS configuration checks diff --git a/src/modules/configuration_check_module.py b/src/modules/configuration_check_module.py index f6b074f8..03018b50 100644 --- a/src/modules/configuration_check_module.py +++ b/src/modules/configuration_check_module.py @@ -875,7 +875,7 @@ def run(self): context["hostname"] = custom_hostname self.set_context(context) - if self.context.get("check_type", {}).get("file_name") == "hana": + if self.context.get("check_type", {}).get("file_name") in ["hana", "db2"]: temp_context = FileSystemCollector(parent=self).collect( check=None, context=self.context ) diff --git a/src/playbook_00_configuration_checks.yml b/src/playbook_00_configuration_checks.yml index ef1ab814..e81afbad 100644 --- a/src/playbook_00_configuration_checks.yml +++ b/src/playbook_00_configuration_checks.yml @@ -106,7 +106,7 @@ loop_control: loop_var: check_type - - name: "Run role-specific checks for DB hosts" + - name: "Run role-specific checks for HANA DB hosts" ansible.builtin.include_tasks: file: "./roles/configuration_checks/tasks/main.yml" vars: @@ -117,18 +117,44 @@ results_var: "db_hana_results" when: >- role == 'DB' and + (platform | default('HANA') | upper == 'HANA') and (configuration_test_type == 'all' or configuration_test_type == 'Database') - - name: "Debug DB checks execution" + - name: "Debug HANA checks execution" ansible.builtin.debug: msg: "Executing Database (HANA) checks on host {{ inventory_hostname }}" when: >- role == 'DB' and + (platform | default('HANA') | upper == 'HANA') and (configuration_test_type == 'all' or configuration_test_type == 'Database') - - name: "Run HA configuration checks for DB hosts" + - name: "Run role-specific checks for DB2 DB hosts" + ansible.builtin.include_tasks: + file: "./roles/configuration_checks/tasks/main.yml" + vars: + check_type: + name: "Database (DB2) Checks" + file_name: "db2" + checks_var: "db_db2_checks" + results_var: "db_db2_results" + when: >- + role == 'DB' and + (platform | default('HANA') | upper == 'DB2') and + (configuration_test_type == 'all' or + configuration_test_type == 'Database') + + - name: "Debug DB2 checks execution" + ansible.builtin.debug: + msg: "Executing Database (DB2) checks on host {{ inventory_hostname }}" + when: >- + role == 'DB' and + (platform | default('HANA') | upper == 'DB2') and + (configuration_test_type == 'all' or + configuration_test_type == 'Database') + + - name: "Run HA configuration checks for HANA DB hosts" ansible.builtin.include_tasks: file: "./roles/configuration_checks/tasks/main.yml" vars: @@ -138,9 +164,10 @@ checks_var: "db_ha_config_checks" results_var: "db_ha_config_results" when: - - role == 'DB' and + - role == 'DB' + - (platform | default('HANA') | upper == 'HANA') - database_high_availability | default(false) | bool - - configuration_test_type == 'all' or configuration_test_type == 'Database') + - (configuration_test_type == 'all' or configuration_test_type == 'Database') - name: "Run role-specific checks for ASCS/SCS hosts" ansible.builtin.include_tasks: @@ -297,6 +324,18 @@ loop: "{{ groups[sap_sid | upper + '_DB']|default([]) }}" when: hostvars[item].db_hana_results is defined + - name: "Collect DB (DB2) check results" + ansible.builtin.set_fact: + all_results: "{{ all_results + hostvars[item].db_db2_results + | default([]) }}" + execution_metadata: "{{ execution_metadata + [ + {'host': item, + 'check_type': 'db_db2', + 'metadata': hostvars[item].db_db2_results_metadata + | default({})}] }}" + loop: "{{ groups[sap_sid | upper + '_DB']|default([]) }}" + when: hostvars[item].db_db2_results is defined + - name: "Collect DB HA configuration check results" ansible.builtin.set_fact: all_results: "{{ all_results + hostvars[item].db_ha_config_results diff --git a/src/roles/configuration_checks/tasks/files/db2.yml b/src/roles/configuration_checks/tasks/files/db2.yml new file mode 100644 index 00000000..adbe6fd6 --- /dev/null +++ b/src/roles/configuration_checks/tasks/files/db2.yml @@ -0,0 +1,537 @@ +enums: + severity: + - info: &info "INFO" + - high: &high "HIGH" + - low: &low "LOW" + - warning: &warning "WARNING" + - critical: &critical "CRITICAL" + - all_severity: &severity [*info, *high, *low, *warning, *critical] + + os_type: + - suse: &suse "SLES_SAP" + - redhat: &redhat "REDHAT" + - oraclelinux: &oraclelinux "OracleLinux" + - windows: &windows "Windows" + - all_os: &os_type [*suse, *redhat, *oraclelinux, *windows] + + os_version: + - suse_12_3: &suse_12_3 "SUSE 12 SP3" + - suse_12_4: &suse_12_4 "SUSE 12 SP4" + - suse_12_5: &suse_12_5 "SUSE 12 SP5" + - suse_15_0: &suse_15_0 "SUSE 15 SP0" + - suse_15_0: &suse_15_1 "SUSE 15 SP1" + - all_versions: &all_versions "all" + + hardware_type: + - vm: &vm "VM" + - hli: &hli "HLI" + - all_hardware: &all_hardware [*vm, *hli] + + storage_type: + premium_storage: &premium_storage ["Premium_LRS","UltraSSD_LRS","PremiumV2_LRS","AFS"] + anf: &anf ["ANF"] + all_storage: &all_storage ["Premium_LRS","UltraSSD_LRS","StandardSSD_LRS","Standard_LRS","ANF","PremiumV2_LRS","AFS"] + + workload: + - sap: &sap "SAP" + - all_workload: &workload [*sap] + + db: + - hana: &hana "HANA" + - mssql: &mssql "MSSQL" + - oracle: &oracle "Oracle" + - db2: &db2 "Db2" + - ase: &ase "ASE" + - all_db: &db [*hana, *mssql, *oracle, *db2, *ase] + + role: + - db: &db_role "DB" + - ascs: &ascs_role "SCS" + - ers: &ers_role "ERS" + - app: &app_role "APP" + - webdispatcher: &web_dispatch "WEB" + - pas: &pas "PAS" + - all_role: &role [*db_role, *ascs_role, *ers_role, *app_role, *web_dispatch, *pas] + + cluster_type: + - sbd: &sbd "ISCSI" + - fencing_agent: &fencing_agent "AFA" + - all_fencing_agent: &cluster_type [*sbd, *fencing_agent] + + collector_type: + - command: &command "command" + - azure: &azure "azure" + - all_collector_type: &collector_type [*command, *azure] + + category: + - package: &package_check "Package" + - vm: &vm_check "Virtual Machine" + - sap: &sap_check "SAP" + - os: &os_check "Operating System" + - all_check_types: &category [*package_check, *vm_check, *sap_check, *os_check] + + user: + - root: &root "root" + - sidadm: &sidadm "sidadm" + - all_users: &user [*root, *sidadm] + + validator_type: + - string: &string "string" + - range: &range "range" + - list: &list "list" + - all: &validator_type [*string, *range, *list] + + report: + - check: &check "check" + - section: §ion "section" + - table: &table "table" + - report: &report [*check, *section, *table] + +# Checks for DB roles for database_type == hana + +checks: + - id: "DB-DB2-0001" + name: "DB2 Hardware Check" + description: "Check if the hardware type is supported for DB2" + category: *sap_check + severity: *high + workload: *sap + applicability: + os_type: [*suse, *redhat] + os_version: *all_versions + hardware_type: *vm + storage_type: *all_storage + role: *all_role + database_type: [*db2] + collector_type: *command + collector_args: + command: "lscpu | grep -E 'Architecture:|Vendor ID:|Model name:' | grep 'x86_64' | awk '{print $2}'" + user: *root + validator_type: *string + validator_args: + valid_list: ["x86_64", "x64"] + report: *check + + - id: "DB-DB2-0002" + name: "Linux installation & system language" + description: "Check if the Linux installation and system language are supported for DB2" + category: *sap_check + severity: *high + workload: *sap + applicability: + os_type: [*suse, *redhat] + os_version: *all_versions + hardware_type: *vm + storage_type: *all_storage + role: *all_role + database_type: [*db2] + collector_type: *command + collector_args: + command: "echo $LANG" + user: *root + validator_type: *string + validator_args: + expected_output: "en_US.UTF-8" + report: *check + + - id: "DB-DB2-0003" + name: "SELinux settings" + description: "SAP recommended SELinux in permissive mode or disabled" + category: *sap_check + severity: *warning + workload: *sap + applicability: + os_type: [*suse, *redhat] + os_version: *all_versions + hardware_type: *vm + storage_type: *all_storage + role: *all_role + database_type: [*db2] + collector_type: *command + collector_args: + command: "grep '^SELINUX=' /etc/selinux/config | awk -F= '{print $2}'" + user: *root + validator_type: *string + validator_args: + expected_output: "en_US.UTF-8" + report: *check + + - id: "DB-DB2-0004" + name: "vm.max_map_count setting" + description: "vm.max_map_count setting" + category: *sap_check + severity: *warning + workload: *sap + applicability: + os_type: [*suse, *redhat] + os_version: *all_versions + hardware_type: *vm + storage_type: *all_storage + role: *all_role + database_type: [*db2] + collector_type: *command + collector_args: + command: "[ $(/sbin/sysctl vm.max_map_count -n) -eq $(($(free -b | grep Mem: | awk '{print $2}') / 4096)) ] && echo OK || echo $(/sbin/sysctl vm.max_map_count -n)" + user: *root + validator_type: *string + validator_args: + expected_output: "OK" + report: *check + + - id: "DB-DB2-0005" + name: "VM Swappiness setting" + description: "vm.swappiness setting" + category: *sap_check + severity: *warning + workload: *sap + applicability: + os_type: [*suse, *redhat] + os_version: *all_versions + hardware_type: *vm + storage_type: *all_storage + role: *all_role + database_type: [*db2] + collector_type: *command + collector_args: + command: "/sbin/sysctl kernel.sem -n" + user: *root + validator_type: *string + validator_args: + expected_output: "32000 1024000000 500 32000" + report: *check + + - id: "DB-DB2-0006" + name: "VM Overcommit recovery setting" + description: "vm.overcommit_memory setting" + category: *sap_check + severity: *warning + workload: *sap + applicability: + os_type: [*suse, *redhat] + os_version: *all_versions + hardware_type: *vm + storage_type: *all_storage + role: *all_role + database_type: [*db2] + collector_type: *command + collector_args: + command: "/sbin/sysctl vm.overcommit_memory -n" + user: *root + validator_type: *string + validator_args: + expected_output: "0" + report: *check + + - id: "DB-DB2-0007" + name: "Randomize VA Space setting" + description: "Randomize VA Space" + category: *sap_check + severity: *warning + workload: *sap + applicability: + os_type: [*suse, *redhat] + os_version: *all_versions + hardware_type: *vm + storage_type: *all_storage + role: *all_role + database_type: [*db2] + collector_type: *command + collector_args: + command: "/sbin/sysctl kernel.randomize_va_space -n" + user: *root + validator_type: *string + validator_args: + expected_output: "5" + report: *check + + - id: "DB-DB2-0008" + name: "Max Async I/O" + description: "Max Async I/O setting" + category: *sap_check + severity: *warning + workload: *sap + applicability: + os_type: [*suse, *redhat] + os_version: *all_versions + hardware_type: *vm + storage_type: *all_storage + role: *all_role + database_type: [*db2] + collector_type: *command + collector_args: + command: "/sbin/sysctl fs.aio-max-nr -n" + user: *root + validator_type: *string + validator_args: + expected_output: "1048576" + report: *check + references: + sap: "2936683" + + - id: "DB-DB2-0009" + name: "kernel.sem" + description: "kernel.sem" + category: *sap_check + severity: *warning + workload: *sap + applicability: + os_type: [*suse, *redhat] + os_version: *all_versions + hardware_type: *vm + storage_type: *all_storage + role: *all_role + database_type: [*db2] + collector_type: *command + collector_args: + command: "/sbin/sysctl kernel.sem -n" + user: *root + validator_type: *string + validator_args: + expected_output: "32000 1024000000 500 32000" + report: *check + references: + sap: "2936683" + + - id: "DB-DB2-0010" + name: "Kernel out of process" + description: "Parameter that controls the system's behavior in response to a kernel out of process [OOPS]." + category: *sap_check + severity: *warning + workload: *sap + applicability: + os_type: [*suse, *redhat] + os_version: *all_versions + hardware_type: *vm + storage_type: *all_storage + role: *all_role + database_type: [*db2] + collector_type: *command + collector_args: + command: "/sbin/sysctl kernel.panic_on_oops -n" + user: *root + validator_type: *string + validator_args: + expected_output: "5" + report: *check + references: + microsoft: "https://www.ibm.com/docs/en/db2/11.1?topic=unix-kernel-parameter-requirements-linux" + + - id: "DB-DB2-0011" + name: "Max File Handles" + description: "Parameter sets the maximum number of file handles (file descriptors) that the kernel will allocate." + category: *sap_check + severity: *warning + workload: *sap + applicability: + os_type: [*suse, *redhat] + os_version: *all_versions + hardware_type: *vm + storage_type: *all_storage + role: *all_role + database_type: [*db2] + collector_type: *command + collector_args: + command: "/sbin/sysctl fs.file-max -n" + user: *root + validator_type: *string + validator_args: + expected_output: "16384" + report: *check + references: + microsoft: "https://www.ibm.com/docs/en/db2/11.1?topic=unix-kernel-parameter-requirements-linux" + + - id: "DB-DB2-0012" + name: "Transparent Huge Pages" + description: "Transparent Huge Pages (THP) is a Linux kernel feature related to memory management." + category: *sap_check + severity: *warning + workload: *sap + applicability: + os_type: [*suse, *redhat] + os_version: *all_versions + hardware_type: *vm + storage_type: *all_storage + role: *all_role + database_type: [*db2] + collector_type: *command + collector_args: + command: "cat /sys/kernel/mm/transparent_hugepage/enabled" + user: *root + validator_type: *string + validator_args: + expected_output: "always madvise [never]" + report: *check + references: + sap: "2391706" + microsoft: "https://www.ibm.com/docs/en/db2/11.1?topic=unix-kernel-parameter-requirements-linux" + + - id: "DB-DB2-0013" + name: "HADR TIMEOUT" + description: "HADR TIMEOUT" + category: *sap_check + severity: *warning + workload: *sap + applicability: + os_type: [*redhat] + os_version: *all_versions + hardware_type: *vm + storage_type: [*premium_storage] + role: [*db_role] + database_type: [*db2] + collector_type: *command + collector_args: + command: "$(getent passwd | cut -d ':' -f1 | grep db2) -c 'db2pd -alldbs -hadr' | grep -i 'HADR_TIMEOUT' | awk '{print $NF}'" + user: *root + validator_type: *string + validator_args: + expected_output: "45" + report: *check + references: + microsoft: "https://learn.microsoft.com/en-us/azure/sap/workloads/high-availability-guide-rhel-ibm-db2-luw?tabs=lb-portal#ibm-db2-hadr-settings-for-azure" + + - id: "DB-DB2-0014" + name: "HADR TIMEOUT" + description: "HADR TIMEOUT" + category: *sap_check + severity: *warning + workload: *sap + applicability: + os_type: [*suse] + os_version: *all_versions + hardware_type: *vm + storage_type: [*premium_storage] + role: [*db_role] + database_type: [*db2] + collector_type: *command + collector_args: + command: "$(getent passwd | cut -d ':' -f1 | grep db2) -c 'db2pd -alldbs -hadr' | grep -i 'HADR_TIMEOUT' | awk '{print $NF}'" + user: *root + validator_type: *string + validator_args: + expected_output: "60" + report: *check + references: + microsoft: "https://learn.microsoft.com/en-us/azure/sap/workloads/high-availability-guide-rhel-ibm-db2-luw?tabs=lb-portal#ibm-db2-hadr-settings-for-azure" + + - id: "DB-DB2-0015" + name: "PEER WINDOW (seconds)" + description: "PEER WINDOW (seconds) RedHat" + category: *sap_check + severity: *warning + workload: *sap + applicability: + os_type: [*redhat] + os_version: *all_versions + hardware_type: *vm + storage_type: [*premium_storage] + role: [*db_role] + database_type: [*db2] + collector_type: *command + collector_args: + command: "$(getent passwd | cut -d ':' -f1 | grep db2) -c 'db2pd -alldbs -hadr' | grep -i 'PEER_WINDOW(seconds)' | awk '{print $NF}'" + user: *root + validator_type: *string + validator_args: + expected_output: "240" + report: *check + references: + microsoft: "https://learn.microsoft.com/en-us/azure/sap/workloads/high-availability-guide-rhel-ibm-db2-luw?tabs=lb-portal#ibm-db2-hadr-settings-for-azure" + + - id: "DB-DB2-0016" + name: "PEER WINDOW (seconds)" + description: "PEER WINDOW (seconds) SUSE SBD" + category: *sap_check + severity: *warning + workload: *sap + applicability: + os_type: [*suse] + os_version: *all_versions + hardware_type: *vm + storage_type: [*premium_storage] + role: [*db_role] + database_type: [*db2] + high_availability_agent: *sbd + collector_type: *command + collector_args: + command: "$(getent passwd | cut -d ':' -f1 | grep db2) -c 'db2pd -alldbs -hadr' | grep -i 'PEER_WINDOW(seconds)' | awk '{print $NF}'" + user: *root + validator_type: *string + validator_args: + expected_output: "300" + report: *check + references: + microsoft: "https://learn.microsoft.com/en-us/azure/sap/workloads/high-availability-guide-rhel-ibm-db2-luw?tabs=lb-portal#ibm-db2-hadr-settings-for-azure" + + - id: "DB-DB2-0017" + name: "PEER WINDOW (seconds)" + description: "PEER WINDOW (seconds) SUSE Fencing Agent" + category: *sap_check + severity: *warning + workload: *sap + applicability: + os_type: [*suse] + os_version: *all_versions + hardware_type: *vm + storage_type: [*premium_storage] + role: [*db_role] + database_type: [*db2] + high_availability_agent: *fencing_agent + collector_type: *command + collector_args: + command: "$(getent passwd | cut -d ':' -f1 | grep db2) -c 'db2pd -alldbs -hadr' | grep -i 'PEER_WINDOW(seconds)' | awk '{print $NF}'" + user: *root + validator_type: *string + validator_args: + expected_output: "900" + report: *check + references: + microsoft: "https://learn.microsoft.com/en-us/azure/sap/workloads/high-availability-guide-rhel-ibm-db2-luw?tabs=lb-portal#ibm-db2-hadr-settings-for-azure" + + - id: "DB-DB2-0018" + name: "Maximum shared memory segments" + description: "Maximum shared memory segments" + category: *sap_check + severity: *warning + workload: *sap + applicability: + os_type: [*suse, *redhat] + os_version: *all_versions + hardware_type: *vm + storage_type: *all_storage + role: [*db_role] + database_type: [*db2] + collector_type: *command + collector_args: + command: "[ $((256 * $(free -g | grep Mem: | awk '{print $2}'))) -ge $(/sbin/sysctl kernel.shmmni -n) ] && echo OK || echo $(/sbin/sysctl kernel.shmmni -n)" + user: *root + validator_type: *string + validator_args: + expected_output: "OK" + report: *check + references: + microsoft: "https://www.ibm.com/docs/en/db2/11.1?topic=unix-kernel-parameter-requirements-linux" + sap: "2751102" + + - id: "DB-DB2-0019" + name: "Instance Memory size" + description: "Instance Memory size" + category: *sap_check + severity: *warning + workload: *sap + applicability: + os_type: [*suse, *redhat] + os_version: *all_versions + hardware_type: *vm + storage_type: [*premium_storage] + role: [*db_role] + database_type: [*db2] + collector_type: *command + collector_args: + command: "output=$(su - $(getent passwd | grep db2 | cut -d : -f1) -c 'db2pd -dbmcfg' | grep INSTANCE_MEMORY | awk '{print $NF}'); [[ $output -le 100 ]] && echo $output || echo $(awk -v output=$output 'BEGIN {print output * 4096 / 1024 / 1024 / 1024}')" + user: *root + validator_type: *range + validator_args: + min: 64 + report: *check + references: + sap: "2751102" diff --git a/src/roles/configuration_checks/tasks/main.yml b/src/roles/configuration_checks/tasks/main.yml index 79c5a7d9..21543ea3 100644 --- a/src/roles/configuration_checks/tasks/main.yml +++ b/src/roles/configuration_checks/tasks/main.yml @@ -39,9 +39,9 @@ execution_warnings: "{{ execution_warnings | default([]) }}" when: compute_metadata is not failed -- name: "{{ check_type.name }} - Include disks task when HANA checks to be run" +- name: "{{ check_type.name }} - Include disks task when HANA or DB2 checks to be run" ansible.builtin.include_tasks: disks.yml - when: check_type.file_name == "hana" + when: check_type.file_name in ["hana", "db2"] - name: "{{ check_type.name }} - Execute HA and Load Balancer module when high_availability checks to be run" ansible.builtin.include_tasks: ha_modules.yml From 025b38e4b6b399a87df82feee8650740b5310a58 Mon Sep 17 00:00:00 2001 From: devanshjain Date: Wed, 22 Oct 2025 08:18:05 +0000 Subject: [PATCH 06/17] Standardize DB2 naming across configuration files to ensure consistency in enums and supported OS/DB combinations. --- src/roles/configuration_checks/tasks/files/app.yml | 2 +- src/roles/configuration_checks/tasks/files/ascs.yml | 2 +- src/roles/configuration_checks/tasks/files/db2.yml | 6 +++--- src/roles/configuration_checks/tasks/files/hana.yml | 2 +- src/roles/configuration_checks/tasks/files/network.yml | 2 +- src/roles/configuration_checks/tasks/files/package.yml | 2 +- src/roles/configuration_checks/tasks/files/sap.yml | 2 +- .../configuration_checks/tasks/files/virtual_machine.yml | 2 +- src/roles/configuration_checks/vars/vm-support.yml | 8 ++++---- 9 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/roles/configuration_checks/tasks/files/app.yml b/src/roles/configuration_checks/tasks/files/app.yml index b0fe7d77..25ea8342 100644 --- a/src/roles/configuration_checks/tasks/files/app.yml +++ b/src/roles/configuration_checks/tasks/files/app.yml @@ -30,7 +30,7 @@ enums: - hana: &hana "HANA" - mssql: &mssql "MSSQL" - oracle: &oracle "Oracle" - - db2: &db2 "Db2" + - db2: &db2 "DB2" - ase: &ase "ASE" - all_db: &all_db [*hana, *mssql, *oracle, *db2, *ase] diff --git a/src/roles/configuration_checks/tasks/files/ascs.yml b/src/roles/configuration_checks/tasks/files/ascs.yml index 680b345a..2b96ccc8 100644 --- a/src/roles/configuration_checks/tasks/files/ascs.yml +++ b/src/roles/configuration_checks/tasks/files/ascs.yml @@ -38,7 +38,7 @@ enums: - hana: &hana "HANA" - mssql: &mssql "MSSQL" - oracle: &oracle "Oracle" - - db2: &db2 "Db2" + - db2: &db2 "DB2" - ase: &ase "ASE" - all_db: &db [*hana, *mssql, *oracle, *db2, *ase] diff --git a/src/roles/configuration_checks/tasks/files/db2.yml b/src/roles/configuration_checks/tasks/files/db2.yml index adbe6fd6..4a6bb9fe 100644 --- a/src/roles/configuration_checks/tasks/files/db2.yml +++ b/src/roles/configuration_checks/tasks/files/db2.yml @@ -19,7 +19,7 @@ enums: - suse_12_4: &suse_12_4 "SUSE 12 SP4" - suse_12_5: &suse_12_5 "SUSE 12 SP5" - suse_15_0: &suse_15_0 "SUSE 15 SP0" - - suse_15_0: &suse_15_1 "SUSE 15 SP1" + - suse_15_1: &suse_15_1 "SUSE 15 SP1" - all_versions: &all_versions "all" hardware_type: @@ -40,7 +40,7 @@ enums: - hana: &hana "HANA" - mssql: &mssql "MSSQL" - oracle: &oracle "Oracle" - - db2: &db2 "Db2" + - db2: &db2 "DB2" - ase: &ase "ASE" - all_db: &db [*hana, *mssql, *oracle, *db2, *ase] @@ -51,7 +51,7 @@ enums: - app: &app_role "APP" - webdispatcher: &web_dispatch "WEB" - pas: &pas "PAS" - - all_role: &role [*db_role, *ascs_role, *ers_role, *app_role, *web_dispatch, *pas] + - all_role: &all_role [*db_role, *ascs_role, *ers_role, *app_role, *web_dispatch, *pas] cluster_type: - sbd: &sbd "ISCSI" diff --git a/src/roles/configuration_checks/tasks/files/hana.yml b/src/roles/configuration_checks/tasks/files/hana.yml index db8ac2c4..d75803b6 100644 --- a/src/roles/configuration_checks/tasks/files/hana.yml +++ b/src/roles/configuration_checks/tasks/files/hana.yml @@ -40,7 +40,7 @@ enums: - hana: &hana "HANA" - mssql: &mssql "MSSQL" - oracle: &oracle "Oracle" - - db2: &db2 "Db2" + - db2: &db2 "DB2" - ase: &ase "ASE" - all_db: &db [*hana, *mssql, *oracle, *db2, *ase] diff --git a/src/roles/configuration_checks/tasks/files/network.yml b/src/roles/configuration_checks/tasks/files/network.yml index bc741179..8d94dadb 100644 --- a/src/roles/configuration_checks/tasks/files/network.yml +++ b/src/roles/configuration_checks/tasks/files/network.yml @@ -40,7 +40,7 @@ enums: - hana: &hana "HANA" - mssql: &mssql "MSSQL" - oracle: &oracle "Oracle" - - db2: &db2 "Db2" + - db2: &db2 "DB2" - ase: &ase "ASE" - all_db: &db [*hana, *mssql, *oracle, *db2, *ase] diff --git a/src/roles/configuration_checks/tasks/files/package.yml b/src/roles/configuration_checks/tasks/files/package.yml index 56dc7cd7..24ba0be3 100644 --- a/src/roles/configuration_checks/tasks/files/package.yml +++ b/src/roles/configuration_checks/tasks/files/package.yml @@ -39,7 +39,7 @@ enums: - hana: &hana "HANA" - mssql: &mssql "MSSQL" - oracle: &oracle "Oracle" - - db2: &db2 "Db2" + - db2: &db2 "DB2" - ase: &ase "ASE" - db: &db [*hana, *mssql, *oracle, *db2, *ase] diff --git a/src/roles/configuration_checks/tasks/files/sap.yml b/src/roles/configuration_checks/tasks/files/sap.yml index 9350686b..7303af88 100644 --- a/src/roles/configuration_checks/tasks/files/sap.yml +++ b/src/roles/configuration_checks/tasks/files/sap.yml @@ -39,7 +39,7 @@ enums: - hana: &hana "HANA" - mssql: &mssql "MSSQL" - oracle: &oracle "Oracle" - - db2: &db2 "Db2" + - db2: &db2 "DB2" - ase: &ase "ASE" - all_db: &db [*hana, *mssql, *oracle, *db2, *ase] diff --git a/src/roles/configuration_checks/tasks/files/virtual_machine.yml b/src/roles/configuration_checks/tasks/files/virtual_machine.yml index d381ab1a..f47aae8f 100644 --- a/src/roles/configuration_checks/tasks/files/virtual_machine.yml +++ b/src/roles/configuration_checks/tasks/files/virtual_machine.yml @@ -39,7 +39,7 @@ enums: - hana: &hana "HANA" - mssql: &mssql "MSSQL" - oracle: &oracle "Oracle" - - db2: &db2 "Db2" + - db2: &db2 "DB2" - ase: &ase "ASE" - all_db: &db [*hana, *mssql, *oracle, *db2, *ase] diff --git a/src/roles/configuration_checks/vars/vm-support.yml b/src/roles/configuration_checks/vars/vm-support.yml index fa12a719..259ceee1 100644 --- a/src/roles/configuration_checks/vars/vm-support.yml +++ b/src/roles/configuration_checks/vars/vm-support.yml @@ -26,7 +26,7 @@ SupportedOSDBCombinations: ERS: [*suse, *redhat, *oraclelinux, *windows] APP: [*windows, *oraclelinux] PAS: [*windows, *oraclelinux] - Db2: + DB2: DB: [*windows, *redhat, *suse] SCS: [*windows, *redhat, *suse] ERS: [*windows, *redhat, *suse] @@ -42,9 +42,9 @@ SupportedOSDBCombinations: # Database configurations db_types: - non_hana_dbs: &db_type_non_hana ["MSSQL", "Oracle", "Db2", "ASE"] - non_hana_dbs_ase: &db_type_non_hana_ase ["MSSQL", "Oracle", "Db2"] - all_dbs: &db_type_all ["MSSQL", "Oracle", "Db2", "HANA", "ASE"] + non_hana_dbs: &db_type_non_hana ["MSSQL", "Oracle", "DB2", "ASE"] + non_hana_dbs_ase: &db_type_non_hana_ase ["MSSQL", "Oracle", "DB2"] + all_dbs: &db_type_all ["MSSQL", "Oracle", "DB2", "HANA", "ASE"] # Storage configurations storage_types: From c6c46e1bf5b0603cab421d184cfe3d7ae5972071 Mon Sep 17 00:00:00 2001 From: devanshjain Date: Wed, 22 Oct 2025 08:27:47 +0000 Subject: [PATCH 07/17] Standardize naming for DB2 to Db2 across configuration files for consistency and clarity. --- src/playbook_00_configuration_checks.yml | 12 ++--- .../configuration_checks/tasks/files/app.yml | 2 +- .../configuration_checks/tasks/files/ascs.yml | 2 +- .../configuration_checks/tasks/files/db2.yml | 46 +++++++++---------- .../configuration_checks/tasks/files/hana.yml | 2 +- .../tasks/files/network.yml | 2 +- .../tasks/files/package.yml | 2 +- .../configuration_checks/tasks/files/sap.yml | 2 +- .../tasks/files/virtual_machine.yml | 2 +- src/roles/configuration_checks/tasks/main.yml | 2 +- .../configuration_checks/vars/vm-support.yml | 14 +++--- 11 files changed, 44 insertions(+), 44 deletions(-) diff --git a/src/playbook_00_configuration_checks.yml b/src/playbook_00_configuration_checks.yml index e81afbad..0d56147a 100644 --- a/src/playbook_00_configuration_checks.yml +++ b/src/playbook_00_configuration_checks.yml @@ -130,24 +130,24 @@ (configuration_test_type == 'all' or configuration_test_type == 'Database') - - name: "Run role-specific checks for DB2 DB hosts" + - name: "Run role-specific checks for Db2 DB hosts" ansible.builtin.include_tasks: file: "./roles/configuration_checks/tasks/main.yml" vars: check_type: - name: "Database (DB2) Checks" + name: "Database (Db2) Checks" file_name: "db2" checks_var: "db_db2_checks" results_var: "db_db2_results" when: >- role == 'DB' and - (platform | default('HANA') | upper == 'DB2') and + (platform | default('HANA') | upper == 'Db2') and (configuration_test_type == 'all' or configuration_test_type == 'Database') - - name: "Debug DB2 checks execution" + - name: "Debug Db2 checks execution" ansible.builtin.debug: - msg: "Executing Database (DB2) checks on host {{ inventory_hostname }}" + msg: "Executing Database (Db2) checks on host {{ inventory_hostname }}" when: >- role == 'DB' and (platform | default('HANA') | upper == 'DB2') and @@ -324,7 +324,7 @@ loop: "{{ groups[sap_sid | upper + '_DB']|default([]) }}" when: hostvars[item].db_hana_results is defined - - name: "Collect DB (DB2) check results" + - name: "Collect DB (Db2) check results" ansible.builtin.set_fact: all_results: "{{ all_results + hostvars[item].db_db2_results | default([]) }}" diff --git a/src/roles/configuration_checks/tasks/files/app.yml b/src/roles/configuration_checks/tasks/files/app.yml index 25ea8342..b0fe7d77 100644 --- a/src/roles/configuration_checks/tasks/files/app.yml +++ b/src/roles/configuration_checks/tasks/files/app.yml @@ -30,7 +30,7 @@ enums: - hana: &hana "HANA" - mssql: &mssql "MSSQL" - oracle: &oracle "Oracle" - - db2: &db2 "DB2" + - db2: &db2 "Db2" - ase: &ase "ASE" - all_db: &all_db [*hana, *mssql, *oracle, *db2, *ase] diff --git a/src/roles/configuration_checks/tasks/files/ascs.yml b/src/roles/configuration_checks/tasks/files/ascs.yml index 2b96ccc8..680b345a 100644 --- a/src/roles/configuration_checks/tasks/files/ascs.yml +++ b/src/roles/configuration_checks/tasks/files/ascs.yml @@ -38,7 +38,7 @@ enums: - hana: &hana "HANA" - mssql: &mssql "MSSQL" - oracle: &oracle "Oracle" - - db2: &db2 "DB2" + - db2: &db2 "Db2" - ase: &ase "ASE" - all_db: &db [*hana, *mssql, *oracle, *db2, *ase] diff --git a/src/roles/configuration_checks/tasks/files/db2.yml b/src/roles/configuration_checks/tasks/files/db2.yml index 4a6bb9fe..75f8f832 100644 --- a/src/roles/configuration_checks/tasks/files/db2.yml +++ b/src/roles/configuration_checks/tasks/files/db2.yml @@ -40,7 +40,7 @@ enums: - hana: &hana "HANA" - mssql: &mssql "MSSQL" - oracle: &oracle "Oracle" - - db2: &db2 "DB2" + - db2: &db2 "Db2" - ase: &ase "ASE" - all_db: &db [*hana, *mssql, *oracle, *db2, *ase] @@ -90,9 +90,9 @@ enums: # Checks for DB roles for database_type == hana checks: - - id: "DB-DB2-0001" - name: "DB2 Hardware Check" - description: "Check if the hardware type is supported for DB2" + - id: "DB-Db2-0001" + name: "Db2 Hardware Check" + description: "Check if the hardware type is supported for Db2" category: *sap_check severity: *high workload: *sap @@ -112,9 +112,9 @@ checks: valid_list: ["x86_64", "x64"] report: *check - - id: "DB-DB2-0002" + - id: "DB-Db2-0002" name: "Linux installation & system language" - description: "Check if the Linux installation and system language are supported for DB2" + description: "Check if the Linux installation and system language are supported for Db2" category: *sap_check severity: *high workload: *sap @@ -134,7 +134,7 @@ checks: expected_output: "en_US.UTF-8" report: *check - - id: "DB-DB2-0003" + - id: "DB-Db2-0003" name: "SELinux settings" description: "SAP recommended SELinux in permissive mode or disabled" category: *sap_check @@ -156,7 +156,7 @@ checks: expected_output: "en_US.UTF-8" report: *check - - id: "DB-DB2-0004" + - id: "DB-Db2-0004" name: "vm.max_map_count setting" description: "vm.max_map_count setting" category: *sap_check @@ -178,7 +178,7 @@ checks: expected_output: "OK" report: *check - - id: "DB-DB2-0005" + - id: "DB-Db2-0005" name: "VM Swappiness setting" description: "vm.swappiness setting" category: *sap_check @@ -200,7 +200,7 @@ checks: expected_output: "32000 1024000000 500 32000" report: *check - - id: "DB-DB2-0006" + - id: "DB-Db2-0006" name: "VM Overcommit recovery setting" description: "vm.overcommit_memory setting" category: *sap_check @@ -222,7 +222,7 @@ checks: expected_output: "0" report: *check - - id: "DB-DB2-0007" + - id: "DB-Db2-0007" name: "Randomize VA Space setting" description: "Randomize VA Space" category: *sap_check @@ -244,7 +244,7 @@ checks: expected_output: "5" report: *check - - id: "DB-DB2-0008" + - id: "DB-Db2-0008" name: "Max Async I/O" description: "Max Async I/O setting" category: *sap_check @@ -268,7 +268,7 @@ checks: references: sap: "2936683" - - id: "DB-DB2-0009" + - id: "DB-Db2-0009" name: "kernel.sem" description: "kernel.sem" category: *sap_check @@ -292,7 +292,7 @@ checks: references: sap: "2936683" - - id: "DB-DB2-0010" + - id: "DB-Db2-0010" name: "Kernel out of process" description: "Parameter that controls the system's behavior in response to a kernel out of process [OOPS]." category: *sap_check @@ -316,7 +316,7 @@ checks: references: microsoft: "https://www.ibm.com/docs/en/db2/11.1?topic=unix-kernel-parameter-requirements-linux" - - id: "DB-DB2-0011" + - id: "DB-Db2-0011" name: "Max File Handles" description: "Parameter sets the maximum number of file handles (file descriptors) that the kernel will allocate." category: *sap_check @@ -340,7 +340,7 @@ checks: references: microsoft: "https://www.ibm.com/docs/en/db2/11.1?topic=unix-kernel-parameter-requirements-linux" - - id: "DB-DB2-0012" + - id: "DB-Db2-0012" name: "Transparent Huge Pages" description: "Transparent Huge Pages (THP) is a Linux kernel feature related to memory management." category: *sap_check @@ -365,7 +365,7 @@ checks: sap: "2391706" microsoft: "https://www.ibm.com/docs/en/db2/11.1?topic=unix-kernel-parameter-requirements-linux" - - id: "DB-DB2-0013" + - id: "DB-Db2-0013" name: "HADR TIMEOUT" description: "HADR TIMEOUT" category: *sap_check @@ -389,7 +389,7 @@ checks: references: microsoft: "https://learn.microsoft.com/en-us/azure/sap/workloads/high-availability-guide-rhel-ibm-db2-luw?tabs=lb-portal#ibm-db2-hadr-settings-for-azure" - - id: "DB-DB2-0014" + - id: "DB-Db2-0014" name: "HADR TIMEOUT" description: "HADR TIMEOUT" category: *sap_check @@ -413,7 +413,7 @@ checks: references: microsoft: "https://learn.microsoft.com/en-us/azure/sap/workloads/high-availability-guide-rhel-ibm-db2-luw?tabs=lb-portal#ibm-db2-hadr-settings-for-azure" - - id: "DB-DB2-0015" + - id: "DB-Db2-0015" name: "PEER WINDOW (seconds)" description: "PEER WINDOW (seconds) RedHat" category: *sap_check @@ -437,7 +437,7 @@ checks: references: microsoft: "https://learn.microsoft.com/en-us/azure/sap/workloads/high-availability-guide-rhel-ibm-db2-luw?tabs=lb-portal#ibm-db2-hadr-settings-for-azure" - - id: "DB-DB2-0016" + - id: "DB-Db2-0016" name: "PEER WINDOW (seconds)" description: "PEER WINDOW (seconds) SUSE SBD" category: *sap_check @@ -462,7 +462,7 @@ checks: references: microsoft: "https://learn.microsoft.com/en-us/azure/sap/workloads/high-availability-guide-rhel-ibm-db2-luw?tabs=lb-portal#ibm-db2-hadr-settings-for-azure" - - id: "DB-DB2-0017" + - id: "DB-Db2-0017" name: "PEER WINDOW (seconds)" description: "PEER WINDOW (seconds) SUSE Fencing Agent" category: *sap_check @@ -487,7 +487,7 @@ checks: references: microsoft: "https://learn.microsoft.com/en-us/azure/sap/workloads/high-availability-guide-rhel-ibm-db2-luw?tabs=lb-portal#ibm-db2-hadr-settings-for-azure" - - id: "DB-DB2-0018" + - id: "DB-Db2-0018" name: "Maximum shared memory segments" description: "Maximum shared memory segments" category: *sap_check @@ -512,7 +512,7 @@ checks: microsoft: "https://www.ibm.com/docs/en/db2/11.1?topic=unix-kernel-parameter-requirements-linux" sap: "2751102" - - id: "DB-DB2-0019" + - id: "DB-Db2-0019" name: "Instance Memory size" description: "Instance Memory size" category: *sap_check diff --git a/src/roles/configuration_checks/tasks/files/hana.yml b/src/roles/configuration_checks/tasks/files/hana.yml index d75803b6..db8ac2c4 100644 --- a/src/roles/configuration_checks/tasks/files/hana.yml +++ b/src/roles/configuration_checks/tasks/files/hana.yml @@ -40,7 +40,7 @@ enums: - hana: &hana "HANA" - mssql: &mssql "MSSQL" - oracle: &oracle "Oracle" - - db2: &db2 "DB2" + - db2: &db2 "Db2" - ase: &ase "ASE" - all_db: &db [*hana, *mssql, *oracle, *db2, *ase] diff --git a/src/roles/configuration_checks/tasks/files/network.yml b/src/roles/configuration_checks/tasks/files/network.yml index 8d94dadb..bc741179 100644 --- a/src/roles/configuration_checks/tasks/files/network.yml +++ b/src/roles/configuration_checks/tasks/files/network.yml @@ -40,7 +40,7 @@ enums: - hana: &hana "HANA" - mssql: &mssql "MSSQL" - oracle: &oracle "Oracle" - - db2: &db2 "DB2" + - db2: &db2 "Db2" - ase: &ase "ASE" - all_db: &db [*hana, *mssql, *oracle, *db2, *ase] diff --git a/src/roles/configuration_checks/tasks/files/package.yml b/src/roles/configuration_checks/tasks/files/package.yml index 24ba0be3..56dc7cd7 100644 --- a/src/roles/configuration_checks/tasks/files/package.yml +++ b/src/roles/configuration_checks/tasks/files/package.yml @@ -39,7 +39,7 @@ enums: - hana: &hana "HANA" - mssql: &mssql "MSSQL" - oracle: &oracle "Oracle" - - db2: &db2 "DB2" + - db2: &db2 "Db2" - ase: &ase "ASE" - db: &db [*hana, *mssql, *oracle, *db2, *ase] diff --git a/src/roles/configuration_checks/tasks/files/sap.yml b/src/roles/configuration_checks/tasks/files/sap.yml index 7303af88..9350686b 100644 --- a/src/roles/configuration_checks/tasks/files/sap.yml +++ b/src/roles/configuration_checks/tasks/files/sap.yml @@ -39,7 +39,7 @@ enums: - hana: &hana "HANA" - mssql: &mssql "MSSQL" - oracle: &oracle "Oracle" - - db2: &db2 "DB2" + - db2: &db2 "Db2" - ase: &ase "ASE" - all_db: &db [*hana, *mssql, *oracle, *db2, *ase] diff --git a/src/roles/configuration_checks/tasks/files/virtual_machine.yml b/src/roles/configuration_checks/tasks/files/virtual_machine.yml index f47aae8f..d381ab1a 100644 --- a/src/roles/configuration_checks/tasks/files/virtual_machine.yml +++ b/src/roles/configuration_checks/tasks/files/virtual_machine.yml @@ -39,7 +39,7 @@ enums: - hana: &hana "HANA" - mssql: &mssql "MSSQL" - oracle: &oracle "Oracle" - - db2: &db2 "DB2" + - db2: &db2 "Db2" - ase: &ase "ASE" - all_db: &db [*hana, *mssql, *oracle, *db2, *ase] diff --git a/src/roles/configuration_checks/tasks/main.yml b/src/roles/configuration_checks/tasks/main.yml index 21543ea3..d7f57c20 100644 --- a/src/roles/configuration_checks/tasks/main.yml +++ b/src/roles/configuration_checks/tasks/main.yml @@ -39,7 +39,7 @@ execution_warnings: "{{ execution_warnings | default([]) }}" when: compute_metadata is not failed -- name: "{{ check_type.name }} - Include disks task when HANA or DB2 checks to be run" +- name: "{{ check_type.name }} - Include disks task when HANA or Db2 checks to be run" ansible.builtin.include_tasks: disks.yml when: check_type.file_name in ["hana", "db2"] diff --git a/src/roles/configuration_checks/vars/vm-support.yml b/src/roles/configuration_checks/vars/vm-support.yml index 259ceee1..bc039ab2 100644 --- a/src/roles/configuration_checks/vars/vm-support.yml +++ b/src/roles/configuration_checks/vars/vm-support.yml @@ -26,7 +26,7 @@ SupportedOSDBCombinations: ERS: [*suse, *redhat, *oraclelinux, *windows] APP: [*windows, *oraclelinux] PAS: [*windows, *oraclelinux] - DB2: + Db2: DB: [*windows, *redhat, *suse] SCS: [*windows, *redhat, *suse] ERS: [*windows, *redhat, *suse] @@ -42,9 +42,9 @@ SupportedOSDBCombinations: # Database configurations db_types: - non_hana_dbs: &db_type_non_hana ["MSSQL", "Oracle", "DB2", "ASE"] - non_hana_dbs_ase: &db_type_non_hana_ase ["MSSQL", "Oracle", "DB2"] - all_dbs: &db_type_all ["MSSQL", "Oracle", "DB2", "HANA", "ASE"] + non_hana_dbs: &db_type_non_hana ["MSSQL", "Oracle", "Db2", "ASE"] + non_hana_dbs_ase: &db_type_non_hana_ase ["MSSQL", "Oracle", "Db2"] + all_dbs: &db_type_all ["MSSQL", "Oracle", "Db2", "HANA", "ASE"] # Storage configurations storage_types: @@ -69,10 +69,10 @@ hana_storage_requirements: HANADataStripeSize: "256.00k" HANALOGStripeSize: "64.00k" -# DB2 Storage Requirements +# Db2 Storage Requirements db2_storage_requirements: - DB2DataStripeSize: "256.00k" - DB2LOGStripeSize: "64.00k" + Db2DataStripeSize: "256.00k" + Db2LOGStripeSize: "64.00k" # VM deployment patterns vm_patterns: From de7b627a5f21ff182cde263acd90384fbed89b4b Mon Sep 17 00:00:00 2001 From: devanshjain Date: Wed, 22 Oct 2025 22:28:06 +0000 Subject: [PATCH 08/17] Enhance Azure Load Balancer module to handle various IP address formats and improve validation checks; add tests for missing and nested properties. --- src/module_utils/collector.py | 4 + src/modules/get_azure_lb.py | 49 ++++- src/playbook_00_configuration_checks.yml | 2 +- .../configuration_checks/tasks/files/db2.yml | 63 ++++--- src/templates/config_checks_report.html | 4 +- tests/modules/get_azure_lb_test.py | 176 ++++++++++++++++++ 6 files changed, 258 insertions(+), 40 deletions(-) diff --git a/src/module_utils/collector.py b/src/module_utils/collector.py index 6f707027..43b09507 100644 --- a/src/module_utils/collector.py +++ b/src/module_utils/collector.py @@ -128,6 +128,10 @@ def collect(self, check, context) -> str: if not re.match(r"^[a-zA-Z0-9_-]+$", user): self.parent.log(logging.ERROR, f"Invalid user parameter: {user}") return f"ERROR: Invalid user parameter: {user}" + + if user == "db2sid": + user = f"db2{context.get('database_sid', '')}" + command = f"sudo -u {shlex.quote(user)} {command}" return self.parent.execute_command_subprocess( diff --git a/src/modules/get_azure_lb.py b/src/modules/get_azure_lb.py index ebff1dae..966f241b 100644 --- a/src/modules/get_azure_lb.py +++ b/src/modules/get_azure_lb.py @@ -233,31 +233,68 @@ def get_load_balancers_details(self) -> None: for inbound_rule in inbound_rules if "privateIpAddress" in inbound_rule ) + + self.log(logging.INFO, f"Looking for load balancers with IPs: {load_balancer_ips}") + found_load_balancer = None + def get_private_ip_from_config(config): + """ + Extract private IP from frontend config, handling different key variations. + Azure SDK might return different structures based on authentication context. + """ + private_ip = ( + config.get("private_ip_address") + or config.get("privateIpAddress") + or config.get("properties", {}).get("private_ip_address") + or config.get("properties", {}).get("privateIpAddress") + ) + return private_ip + found_load_balancer = next( ( lb for lb in load_balancers - for frontend_ip_config in lb["frontend_ip_configurations"] - if frontend_ip_config["private_ip_address"] in load_balancer_ips + for frontend_ip_config in lb.get("frontend_ip_configurations", []) + if get_private_ip_from_config(frontend_ip_config) in load_balancer_ips ), None, ) + + if not found_load_balancer and load_balancers: + available_ips = [] + self.log( + logging.WARNING, f"No matching load balancer found for IPs: {load_balancer_ips}" + ) + for lb in load_balancers: + lb_name = lb.get("name", "unknown") + for config in lb.get("frontend_ip_configurations", []): + private_ip = get_private_ip_from_config(config) + if private_ip: + available_ips.append(f"{lb_name}:{private_ip}") + else: + self.log( + logging.DEBUG, + f"Frontend config structure for {lb_name}: {list(config.keys())}", + ) + self.log(logging.WARNING, f"Available load balancers and private IPs: {available_ips}") parameters = [] def check_parameters(entity, parameters_dict, entity_type): for key, value_object in parameters_dict.items(): + entity_value = entity.get(key, "N/A") + expected_value = value_object.get("value", "") + parameters.append( Parameters( category=entity_type, - id=entity["name"], + id=entity.get("name", "unknown"), name=key, - value=str(entity[key]), - expected_value=str(value_object.get("value", "")), + value=str(entity_value), + expected_value=str(expected_value), status=( TestStatus.SUCCESS.value - if entity[key] == value_object.get("value", "") + if entity_value == expected_value else TestStatus.ERROR.value ), ).to_dict() diff --git a/src/playbook_00_configuration_checks.yml b/src/playbook_00_configuration_checks.yml index 0d56147a..1c1f5df3 100644 --- a/src/playbook_00_configuration_checks.yml +++ b/src/playbook_00_configuration_checks.yml @@ -141,7 +141,7 @@ results_var: "db_db2_results" when: >- role == 'DB' and - (platform | default('HANA') | upper == 'Db2') and + (platform | default('HANA') | upper == 'DB2') and (configuration_test_type == 'all' or configuration_test_type == 'Database') diff --git a/src/roles/configuration_checks/tasks/files/db2.yml b/src/roles/configuration_checks/tasks/files/db2.yml index 75f8f832..7a608a34 100644 --- a/src/roles/configuration_checks/tasks/files/db2.yml +++ b/src/roles/configuration_checks/tasks/files/db2.yml @@ -73,6 +73,7 @@ enums: user: - root: &root "root" - sidadm: &sidadm "sidadm" + - db2adm: &db2sid "db2sid" - all_users: &user [*root, *sidadm] validator_type: @@ -107,7 +108,7 @@ checks: collector_args: command: "lscpu | grep -E 'Architecture:|Vendor ID:|Model name:' | grep 'x86_64' | awk '{print $2}'" user: *root - validator_type: *string + validator_type: *list validator_args: valid_list: ["x86_64", "x64"] report: *check @@ -138,7 +139,7 @@ checks: name: "SELinux settings" description: "SAP recommended SELinux in permissive mode or disabled" category: *sap_check - severity: *warning + severity: *high workload: *sap applicability: os_type: [*suse, *redhat] @@ -153,14 +154,14 @@ checks: user: *root validator_type: *string validator_args: - expected_output: "en_US.UTF-8" + expected_output: "enforcing" report: *check - id: "DB-Db2-0004" name: "vm.max_map_count setting" description: "vm.max_map_count setting" category: *sap_check - severity: *warning + severity: *high workload: *sap applicability: os_type: [*suse, *redhat] @@ -182,7 +183,7 @@ checks: name: "VM Swappiness setting" description: "vm.swappiness setting" category: *sap_check - severity: *warning + severity: *high workload: *sap applicability: os_type: [*suse, *redhat] @@ -204,7 +205,7 @@ checks: name: "VM Overcommit recovery setting" description: "vm.overcommit_memory setting" category: *sap_check - severity: *warning + severity: *high workload: *sap applicability: os_type: [*suse, *redhat] @@ -226,7 +227,7 @@ checks: name: "Randomize VA Space setting" description: "Randomize VA Space" category: *sap_check - severity: *warning + severity: *high workload: *sap applicability: os_type: [*suse, *redhat] @@ -248,7 +249,7 @@ checks: name: "Max Async I/O" description: "Max Async I/O setting" category: *sap_check - severity: *warning + severity: *high workload: *sap applicability: os_type: [*suse, *redhat] @@ -272,7 +273,7 @@ checks: name: "kernel.sem" description: "kernel.sem" category: *sap_check - severity: *warning + severity: *high workload: *sap applicability: os_type: [*suse, *redhat] @@ -296,7 +297,7 @@ checks: name: "Kernel out of process" description: "Parameter that controls the system's behavior in response to a kernel out of process [OOPS]." category: *sap_check - severity: *warning + severity: *high workload: *sap applicability: os_type: [*suse, *redhat] @@ -320,7 +321,7 @@ checks: name: "Max File Handles" description: "Parameter sets the maximum number of file handles (file descriptors) that the kernel will allocate." category: *sap_check - severity: *warning + severity: *high workload: *sap applicability: os_type: [*suse, *redhat] @@ -344,7 +345,7 @@ checks: name: "Transparent Huge Pages" description: "Transparent Huge Pages (THP) is a Linux kernel feature related to memory management." category: *sap_check - severity: *warning + severity: *high workload: *sap applicability: os_type: [*suse, *redhat] @@ -369,7 +370,7 @@ checks: name: "HADR TIMEOUT" description: "HADR TIMEOUT" category: *sap_check - severity: *warning + severity: *high workload: *sap applicability: os_type: [*redhat] @@ -380,8 +381,8 @@ checks: database_type: [*db2] collector_type: *command collector_args: - command: "$(getent passwd | cut -d ':' -f1 | grep db2) -c 'db2pd -alldbs -hadr' | grep -i 'HADR_TIMEOUT' | awk '{print $NF}'" - user: *root + command: "db2pd -alldbs -hadr | grep -i 'HADR_TIMEOUT' | awk '{print $NF}'" + user: *db2sid validator_type: *string validator_args: expected_output: "45" @@ -393,7 +394,7 @@ checks: name: "HADR TIMEOUT" description: "HADR TIMEOUT" category: *sap_check - severity: *warning + severity: *high workload: *sap applicability: os_type: [*suse] @@ -404,7 +405,7 @@ checks: database_type: [*db2] collector_type: *command collector_args: - command: "$(getent passwd | cut -d ':' -f1 | grep db2) -c 'db2pd -alldbs -hadr' | grep -i 'HADR_TIMEOUT' | awk '{print $NF}'" + command: "db2pd -alldbs -hadr | grep -i 'HADR_TIMEOUT' | awk '{print $NF}'" user: *root validator_type: *string validator_args: @@ -417,7 +418,7 @@ checks: name: "PEER WINDOW (seconds)" description: "PEER WINDOW (seconds) RedHat" category: *sap_check - severity: *warning + severity: *high workload: *sap applicability: os_type: [*redhat] @@ -428,8 +429,8 @@ checks: database_type: [*db2] collector_type: *command collector_args: - command: "$(getent passwd | cut -d ':' -f1 | grep db2) -c 'db2pd -alldbs -hadr' | grep -i 'PEER_WINDOW(seconds)' | awk '{print $NF}'" - user: *root + command: "db2pd -alldbs -hadr | grep -i 'PEER_WINDOW(seconds)' | awk '{print $NF}'" + user: *db2sid validator_type: *string validator_args: expected_output: "240" @@ -441,7 +442,7 @@ checks: name: "PEER WINDOW (seconds)" description: "PEER WINDOW (seconds) SUSE SBD" category: *sap_check - severity: *warning + severity: *high workload: *sap applicability: os_type: [*suse] @@ -453,8 +454,8 @@ checks: high_availability_agent: *sbd collector_type: *command collector_args: - command: "$(getent passwd | cut -d ':' -f1 | grep db2) -c 'db2pd -alldbs -hadr' | grep -i 'PEER_WINDOW(seconds)' | awk '{print $NF}'" - user: *root + command: "db2pd -alldbs -hadr | grep -i 'PEER_WINDOW(seconds)' | awk '{print $NF}'" + user: *db2sid validator_type: *string validator_args: expected_output: "300" @@ -466,7 +467,7 @@ checks: name: "PEER WINDOW (seconds)" description: "PEER WINDOW (seconds) SUSE Fencing Agent" category: *sap_check - severity: *warning + severity: *high workload: *sap applicability: os_type: [*suse] @@ -478,8 +479,8 @@ checks: high_availability_agent: *fencing_agent collector_type: *command collector_args: - command: "$(getent passwd | cut -d ':' -f1 | grep db2) -c 'db2pd -alldbs -hadr' | grep -i 'PEER_WINDOW(seconds)' | awk '{print $NF}'" - user: *root + command: "db2pd -alldbs -hadr | grep -i 'PEER_WINDOW(seconds)' | awk '{print $NF}'" + user: *db2sid validator_type: *string validator_args: expected_output: "900" @@ -491,7 +492,7 @@ checks: name: "Maximum shared memory segments" description: "Maximum shared memory segments" category: *sap_check - severity: *warning + severity: *high workload: *sap applicability: os_type: [*suse, *redhat] @@ -502,7 +503,7 @@ checks: database_type: [*db2] collector_type: *command collector_args: - command: "[ $((256 * $(free -g | grep Mem: | awk '{print $2}'))) -ge $(/sbin/sysctl kernel.shmmni -n) ] && echo OK || echo $(/sbin/sysctl kernel.shmmni -n)" + command: "[ $((256 * $(free -g | grep Mem: | awk '{print $2}'))) -ge $(/sbin/sysctl kernel.shmmni -n) ] && echo OK || echo $(/sbin/sysctl kernel.shmmni -n)" user: *root validator_type: *string validator_args: @@ -516,7 +517,7 @@ checks: name: "Instance Memory size" description: "Instance Memory size" category: *sap_check - severity: *warning + severity: *high workload: *sap applicability: os_type: [*suse, *redhat] @@ -527,8 +528,8 @@ checks: database_type: [*db2] collector_type: *command collector_args: - command: "output=$(su - $(getent passwd | grep db2 | cut -d : -f1) -c 'db2pd -dbmcfg' | grep INSTANCE_MEMORY | awk '{print $NF}'); [[ $output -le 100 ]] && echo $output || echo $(awk -v output=$output 'BEGIN {print output * 4096 / 1024 / 1024 / 1024}')" - user: *root + command: "output=$(db2pd -dbmcfg | grep INSTANCE_MEMORY | awk '{print $NF}'); [[ $output -le 100 ]] && echo $output || echo $(awk -v output=$output 'BEGIN {print output * 4096 / 1024 / 1024 / 1024}')" + user: *db2sid validator_type: *range validator_args: min: 64 diff --git a/src/templates/config_checks_report.html b/src/templates/config_checks_report.html index 27c7a248..6e491830 100644 --- a/src/templates/config_checks_report.html +++ b/src/templates/config_checks_report.html @@ -1250,8 +1250,8 @@

{{ check_type }}

{{ check.check.id }} {{ check.check.name }} - {{ check.actual_value|truncate(40) }} - {{ check.expected_value }} + {{ (check.actual_value|default('N/A', true)|string)|truncate(40) }} + {{ check.expected_value|default('N/A', true) }} {{ check.status }} diff --git a/tests/modules/get_azure_lb_test.py b/tests/modules/get_azure_lb_test.py index e85491b4..44c7c836 100644 --- a/tests/modules/get_azure_lb_test.py +++ b/tests/modules/get_azure_lb_test.py @@ -120,6 +120,182 @@ def test_get_load_balancers_details(self, azure_lb): assert azure_lb.result["status"] == "PASSED" assert azure_lb.result["details"]["parameters"] is not None + def test_load_balancer_missing_private_ip(self, mocker): + """ + Test handling of load balancers without private_ip_address in frontend config. + This simulates the scenario where a load balancer uses public IPs only. + + :param mocker: Mocking library for Python. + :type mocker: _mocker.MagicMock + """ + class LBWithoutPrivateIP: + def __init__(self): + self.name = "public-lb" + self.location = "test" + self.frontend_ip_configurations = [ + {"public_ip_address": "1.2.3.4"} + ] + self.load_balancing_rules = [] + self.probes = [] + + def as_dict(self): + return { + "name": self.name, + "location": self.location, + "frontend_ip_configurations": self.frontend_ip_configurations, + "load_balancing_rules": self.load_balancing_rules, + "probes": self.probes, + } + + patched_client = mocker.patch("src.modules.get_azure_lb.NetworkManagementClient") + patched_client.return_value.load_balancers.list_all.return_value = [ + LBWithoutPrivateIP(), + LoadBalancer("test", "127.0.0.1"), + + azure_lb = AzureLoadBalancer( + module_params={ + "subscription_id": "test", + "region": "test", + "inbound_rules": repr( + [ + { + "backendPort": "0", + "frontendPort": "0", + "protocol": "All", + "privateIpAddress": "127.0.0.1", + } + ] + ), + "constants": { + "AZURE_LOADBALANCER": { + "RULES": {}, + "PROBES": {}, + } + }, + } + ) + azure_lb.get_load_balancers_details() + assert azure_lb.result["status"] == "PASSED" + + def test_load_balancer_camelcase_ip_address(self, mocker): + """ + Test handling of load balancers with camelCase privateIpAddress key. + This simulates different Azure API response formats based on auth context. + + :param mocker: Mocking library for Python. + :type mocker: _mocker.MagicMock + """ + class LBWithCamelCase: + def __init__(self): + self.name = "camelcase-lb" + self.location = "test" + self.frontend_ip_configurations = [ + {"privateIpAddress": "192.168.1.1"} + ] + self.load_balancing_rules = [] + self.probes = [] + + def as_dict(self): + return { + "name": self.name, + "location": self.location, + "frontend_ip_configurations": self.frontend_ip_configurations, + "load_balancing_rules": self.load_balancing_rules, + "probes": self.probes, + } + + patched_client = mocker.patch("src.modules.get_azure_lb.NetworkManagementClient") + patched_client.return_value.load_balancers.list_all.return_value = [ + LBWithCamelCase(), + ] + + azure_lb = AzureLoadBalancer( + module_params={ + "subscription_id": "test", + "region": "test", + "inbound_rules": repr( + [ + { + "backendPort": "0", + "frontendPort": "0", + "protocol": "All", + "privateIpAddress": "192.168.1.1", + } + ] + ), + "constants": { + "AZURE_LOADBALANCER": { + "RULES": {}, + "PROBES": {}, + } + }, + } + ) + azure_lb.get_load_balancers_details() + assert azure_lb.result["status"] == "PASSED" + + def test_load_balancer_nested_properties(self, mocker): + """ + Test handling of load balancers with nested properties structure. + This simulates Azure API returning nested property objects. + + :param mocker: Mocking library for Python. + :type mocker: _mocker.MagicMock + """ + class LBWithNestedProperties: + def __init__(self): + self.name = "nested-lb" + self.location = "test" + self.frontend_ip_configurations = [ + { + "properties": { + "private_ip_address": "10.0.0.5" + } + } + ] + self.load_balancing_rules = [] + self.probes = [] + + def as_dict(self): + return { + "name": self.name, + "location": self.location, + "frontend_ip_configurations": self.frontend_ip_configurations, + "load_balancing_rules": self.load_balancing_rules, + "probes": self.probes, + } + + patched_client = mocker.patch("src.modules.get_azure_lb.NetworkManagementClient") + patched_client.return_value.load_balancers.list_all.return_value = [ + LBWithNestedProperties(), + ] + + azure_lb = AzureLoadBalancer( + module_params={ + "subscription_id": "test", + "region": "test", + "inbound_rules": repr( + [ + { + "backendPort": "0", + "frontendPort": "0", + "protocol": "All", + "privateIpAddress": "10.0.0.5", + } + ] + ), + "constants": { + "AZURE_LOADBALANCER": { + "RULES": {}, + "PROBES": {}, + } + }, + } + ) + + azure_lb.get_load_balancers_details() + assert azure_lb.result["status"] == "PASSED" + def test_main(self, monkeypatch): """ Test the main function. From 53e2e1156460e9bc86618d9e2cdf07d0efcbfe9c Mon Sep 17 00:00:00 2001 From: devanshjain Date: Wed, 22 Oct 2025 22:44:57 +0000 Subject: [PATCH 09/17] Refactor user parameter handling in CommandCollector and update DB2 user references in configuration checks; enhance network checks for clarity and accuracy. --- src/module_utils/collector.py | 2 +- .../configuration_checks/tasks/files/db2.yml | 103 ++++++++++++++++++ .../tasks/files/network.yml | 53 ++++++--- .../tasks/files/virtual_machine.yml | 5 +- 4 files changed, 145 insertions(+), 18 deletions(-) diff --git a/src/module_utils/collector.py b/src/module_utils/collector.py index 43b09507..3363498b 100644 --- a/src/module_utils/collector.py +++ b/src/module_utils/collector.py @@ -130,7 +130,7 @@ def collect(self, check, context) -> str: return f"ERROR: Invalid user parameter: {user}" if user == "db2sid": - user = f"db2{context.get('database_sid', '')}" + user = f"db2{context.get('database_sid', '').lower()}" command = f"sudo -u {shlex.quote(user)} {command}" diff --git a/src/roles/configuration_checks/tasks/files/db2.yml b/src/roles/configuration_checks/tasks/files/db2.yml index 7a608a34..b85a3798 100644 --- a/src/roles/configuration_checks/tasks/files/db2.yml +++ b/src/roles/configuration_checks/tasks/files/db2.yml @@ -536,3 +536,106 @@ checks: report: *check references: sap: "2751102" + + - id: "DB-Db2-0020" + name: "Filesystem Mount Points" + description: "Displays all filesystem mount points on the Db2 system" + category: *sap_check + severity: *info + workload: *sap + applicability: + os_type: [*suse, *redhat] + os_version: *all_versions + hardware_type: *vm + storage_type: *all_storage + role: [*db_role] + database_type: [*db2] + collector_type: *azure + collector_args: + resource_type: "filesystem" + report: *table + references: + microsoft: "https://learn.microsoft.com/azure/virtual-machines/workloads/sap/dbms-guide-ibm" + + - id: "DB-Db2-0021" + name: "Azure Disks" + description: "Displays Azure disk configuration for the Db2 system" + category: *sap_check + severity: *info + workload: *sap + applicability: + os_type: [*suse, *redhat] + os_version: *all_versions + hardware_type: *vm + storage_type: *all_storage + role: [*db_role] + database_type: [*db2] + collector_type: *azure + collector_args: + resource_type: "disk" + report: *table + references: + microsoft: "https://learn.microsoft.com/azure/virtual-machines/workloads/sap/dbms-guide-ibm" + + - id: "DB-Db2-0022" + name: "LVM Groups" + description: "Lists all LVM groups to ensure SAP HANA directories are configured with supported LVM configurations." + category: *sap_check + severity: *info + workload: *sap + applicability: + os_type: [*suse, *redhat] + os_version: *all_versions + hardware_type: *vm + storage_type: *all_storage + role: [*db_role] + database_type: [*hana] + collector_type: *azure + collector_args: + resource_type: "lvm_groups" + report: *table + references: + sap: "2972496" + microsoft: "https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/hana-vm-operations-storage" + + - id: "DB-Db2-0023" + name: "LVM Volumes" + description: "Lists all LVM volumes to ensure SAP HANA directories are configured with supported LVM configurations." + category: *sap_check + severity: *info + workload: *sap + applicability: + os_type: [*suse, *redhat] + os_version: *all_versions + hardware_type: *vm + storage_type: *all_storage + role: [*db_role] + database_type: [*hana] + collector_type: *azure + collector_args: + resource_type: "lvm_volumes" + report: *table + references: + sap: "2972496" + microsoft: "https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/hana-vm-operations-storage" + + - id: "DB-Db2-0024" + name: "ANF Volumes" + description: "Lists all ANF volumes to ensure SAP HANA directories are configured with supported ANF configurations." + category: *sap_check + severity: *info + workload: *sap + applicability: + os_type: [*suse, *redhat] + os_version: *all_versions + hardware_type: *vm + storage_type: *all_storage + role: [*db_role] + database_type: [*hana] + collector_type: *azure + collector_args: + resource_type: "anf_volumes" + report: *table + references: + sap: "2972496" + microsoft: "https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/hana-vm-operations-storage" diff --git a/src/roles/configuration_checks/tasks/files/network.yml b/src/roles/configuration_checks/tasks/files/network.yml index bc741179..be80ad56 100644 --- a/src/roles/configuration_checks/tasks/files/network.yml +++ b/src/roles/configuration_checks/tasks/files/network.yml @@ -90,8 +90,8 @@ enums: checks: - id: "NET-0001" - name: "No of network interface" - description: "Checks the number of network interfaces on the VM" + name: "Virtual Network" + description: "Retrieves the virtual network(s) associated with the VM's network interface(s)" category: *network_check severity: *info workload: *workload @@ -103,12 +103,16 @@ checks: az vm nic list --resource-group {{ CONTEXT.resource_group_name }} \ --vm-name {{ CONTEXT.vm_name }} \ --subscription {{ CONTEXT.subscription_id }} \ - --query "[].{Name:id}" -o tsv | wc -l + --query "[].id" -o tsv | while read nic_id; do \ + nic=$(basename "$nic_id"); \ + az network nic show --resource-group {{ CONTEXT.resource_group_name }} --name "$nic" \ + --query "ipConfigurations[].subnet.id" -o tsv | awk -F'/' '{print $(NF-2)}'; \ + done report: *check - id: "NET-0002" - name: "Network Interface Name" - description: "Retrieves the name of the network interface(s) attached to the VM" + name: "Subnet" + description: "Retrieves the subnet(s) associated with the VM's network interface(s)" category: *network_check severity: *info workload: *workload @@ -120,12 +124,16 @@ checks: az vm nic list --resource-group {{ CONTEXT.resource_group_name }} \ --vm-name {{ CONTEXT.vm_name }} \ --subscription {{ CONTEXT.subscription_id }} \ - --query "[].id" -o tsv | xargs -I {} basename {} + --query "[].id" -o tsv | while read nic_id; do \ + nic=$(basename "$nic_id"); \ + az network nic show --resource-group {{ CONTEXT.resource_group_name }} --name "$nic" \ + --query "ipConfigurations[].subnet.id" -o tsv | xargs -I {} basename {}; \ + done report: *check - id: "NET-0003" - name: "Subnet" - description: "Retrieves the subnet(s) associated with the VM's network interface(s)" + name: "No of network interface" + description: "Checks the number of network interfaces on the VM" category: *network_check severity: *info workload: *workload @@ -137,14 +145,29 @@ checks: az vm nic list --resource-group {{ CONTEXT.resource_group_name }} \ --vm-name {{ CONTEXT.vm_name }} \ --subscription {{ CONTEXT.subscription_id }} \ - --query "[].id" -o tsv | while read nic_id; do \ - nic=$(basename "$nic_id"); \ - az network nic show --resource-group {{ CONTEXT.resource_group_name }} --name "$nic" \ - --query "ipConfigurations[].subnet.id" -o tsv | xargs -I {} basename {}; \ - done + --query "[].{Name:id}" -o tsv | wc -l report: *check - id: "NET-0004" + name: "Network Interface Name" + description: "Retrieves the name of the network interface(s) attached to the VM" + category: *network_check + severity: *info + workload: *workload + applicability: + hardware_type: *vm + collector_type: *azure + collector_args: + command: |- + az vm nic list --resource-group {{ CONTEXT.resource_group_name }} \ + --vm-name {{ CONTEXT.vm_name }} \ + --subscription {{ CONTEXT.subscription_id }} \ + --query "[].id" -o tsv | xargs -I {} basename {} + report: *check + + + + - id: "NET-0005" name: "Accelerated Networking" description: "Checks if Accelerated Networking is enabled on the VM's network interface(s)" category: *network_check @@ -169,7 +192,7 @@ checks: expected_output: "true" report: *check - - id: "NET-0005" + - id: "NET-0006" name: "No of IP configurations" description: "Checks the number of IP configurations on each network interface" category: *network_check @@ -191,7 +214,7 @@ checks: done report: *check - - id: "NET-0006" + - id: "NET-0007" name: "IP Details" description: "Retrieves all IP addresses configured on the VM's network interface(s)" category: *network_check diff --git a/src/roles/configuration_checks/tasks/files/virtual_machine.yml b/src/roles/configuration_checks/tasks/files/virtual_machine.yml index d381ab1a..c27e17b1 100644 --- a/src/roles/configuration_checks/tasks/files/virtual_machine.yml +++ b/src/roles/configuration_checks/tasks/files/virtual_machine.yml @@ -72,7 +72,8 @@ enums: user: - root: &root "root" - sidadm: &sidadm "sidadm" - - all_users: &user [*root, *sidadm] + - db2sid: &db2sid "db2sid" + - all_users: &user [*root, *sidadm, *db2sid] validator_type: - string: &string "string" @@ -564,7 +565,7 @@ checks: collector_type: *command collector_args: command: "db2pd -alldbs -hadr" - user: *sidadm + user: *db2sid report: *section - id: "IC-0029" From 2e871f831e26a4dce46e9a24592f065ca8337591 Mon Sep 17 00:00:00 2001 From: devanshjain Date: Wed, 22 Oct 2025 23:04:46 +0000 Subject: [PATCH 10/17] Add min_list validation for kernel parameters and update related tests --- src/modules/configuration_check_module.py | 39 ++++++++ .../configuration_checks/tasks/files/db2.yml | 13 +-- .../configuration_check_module_test.py | 88 +++++++++++++++++++ 3 files changed, 135 insertions(+), 5 deletions(-) diff --git a/src/modules/configuration_check_module.py b/src/modules/configuration_check_module.py index 03018b50..ad1cfd63 100644 --- a/src/modules/configuration_check_module.py +++ b/src/modules/configuration_check_module.py @@ -91,6 +91,7 @@ def _init_validator_registry(self) -> Dict[str, Any]: "string": self.validate_string, "range": self.validate_numeric_range, "list": self.validate_list, + "min_list": self.validate_min_list, "check_support": self.validate_vm_support, "properties": self.validate_properties, } @@ -497,6 +498,39 @@ def validate_list(self, check: Check, collected_data: str) -> Dict[str, Any]: ), } + def validate_min_list(self, check: Check, collected_data: str) -> Dict[str, Any]: + """ + Validate that each value in a space-separated list meets or exceeds minimum values. + Used for kernel parameters like kernel.sem where actual values must be >= minimum required. + + :param check: Check definition containing min_values and separator in validator_args + :type check: Check + :param collected_data: Space-separated string of values from system + :type collected_data: str + :return: Validation result dictionary + :rtype: Dict[str, Any] + """ + min_values = check.validator_args.get("min_values", []) + separator = check.validator_args.get("separator", " ") + + if not isinstance(min_values, list): + return { + "status": TestStatus.ERROR.value, + } + + collected_values = str(collected_data).strip().split(separator) if collected_data else [] + collected_values = [val.strip() for val in collected_values if val.strip()] + if len(collected_values) != len(min_values): + return { + "status": self._create_validation_result(check.severity, False), + } + all_valid = all( + int(actual) >= int(minimum) for actual, minimum in zip(collected_values, min_values) + ) + return { + "status": self._create_validation_result(check.severity, all_valid), + } + def validate_vm_support(self, check: Check, collected_data: str) -> Dict[str, Any]: """ Validates if a VM SKU is supported for the given role and database type @@ -609,6 +643,11 @@ def create_result( valid_list = check.validator_args.get("valid_list", []) if isinstance(valid_list, list) and valid_list: expected_value = ", ".join(str(v) for v in valid_list) + elif check.validator_type == "min_list": + min_values = check.validator_args.get("min_values", []) + separator = check.validator_args.get("separator", " ") + if isinstance(min_values, list) and min_values: + expected_value = f"Min: {separator.join(str(v) for v in min_values)}" elif check.validator_type == "properties": props = check.validator_args.get("properties", []) if isinstance(props, list) and props: diff --git a/src/roles/configuration_checks/tasks/files/db2.yml b/src/roles/configuration_checks/tasks/files/db2.yml index b85a3798..d058259c 100644 --- a/src/roles/configuration_checks/tasks/files/db2.yml +++ b/src/roles/configuration_checks/tasks/files/db2.yml @@ -194,12 +194,14 @@ checks: database_type: [*db2] collector_type: *command collector_args: - command: "/sbin/sysctl kernel.sem -n" + command: "/sbin/sysctl vm.swappiness -n" user: *root validator_type: *string validator_args: - expected_output: "32000 1024000000 500 32000" + expected_output: "5" report: *check + references: + sap: "2936683" - id: "DB-Db2-0006" name: "VM Overcommit recovery setting" @@ -271,7 +273,7 @@ checks: - id: "DB-Db2-0009" name: "kernel.sem" - description: "kernel.sem" + description: "kernel.sem - SAP minimum required values (actual values can be equal or higher)" category: *sap_check severity: *high workload: *sap @@ -286,9 +288,10 @@ checks: collector_args: command: "/sbin/sysctl kernel.sem -n" user: *root - validator_type: *string + validator_type: "min_list" validator_args: - expected_output: "32000 1024000000 500 32000" + min_values: ["32000", "1024000000", "500", "32000"] + separator: " " report: *check references: sap: "2936683" diff --git a/tests/modules/configuration_check_module_test.py b/tests/modules/configuration_check_module_test.py index a7808112..cf184741 100644 --- a/tests/modules/configuration_check_module_test.py +++ b/tests/modules/configuration_check_module_test.py @@ -290,6 +290,30 @@ def test_validate_list_no_match(self, config_module, sample_check): result = config_module.validate_list(sample_check, "item3, item4") assert result["status"] == TestStatus.WARNING.value + def test_validate_min_list_all_equal(self, config_module, sample_check): + """Test min_list validation with all values equal to minimum""" + sample_check.validator_args = { + "min_values": ["32000", "1024000000", "500", "32000"], + "separator": " ", + } + result = config_module.validate_min_list(sample_check, "32000 1024000000 500 32000") + assert result["status"] == TestStatus.SUCCESS.value + sample_check.validator_args = { + "min_values": ["32000", "1024000000", "500", "32000"], + "separator": " ", + } + result = config_module.validate_min_list(sample_check, "32000 1024000000 500 32768") + assert result["status"] == TestStatus.SUCCESS.value + + def test_validate_min_list_tab_separator(self, config_module, sample_check): + """Test min_list validation with tab separator""" + sample_check.validator_args = { + "min_values": ["10", "20", "30"], + "separator": "\t", + } + result = config_module.validate_min_list(sample_check, "10\t20\t30") + assert result["status"] == TestStatus.SUCCESS.value + def test_validate_properties_success(self, config_module, sample_check): """Test properties validation with matching properties""" sample_check.validator_args = { @@ -424,6 +448,70 @@ def mock_collect_error(check, context): assert result.status == TestStatus.ERROR.value assert "Error" in result.details + def test_execute_check_min_list_validator_success(self, config_module): + """Test check execution with min_list validator - values meet minimum""" + config_module.set_context({"hostname": "testhost"}) + check = Check( + id="kernel_sem_check", + name="kernel.sem", + description="Kernel semaphore parameters", + category="OS", + workload="SAP", + severity=TestSeverity.HIGH, + collector_type="command", + collector_args={"command": "/sbin/sysctl kernel.sem -n"}, + validator_type="min_list", + validator_args={ + "min_values": ["32000", "1024000000", "500", "32000"], + "separator": " ", + }, + tags=["kernel"], + applicability=[], + references={}, + report="check", + ) + + def mock_collect(check_obj, context): + return "32000 1024000000 500 32768" + + with patch("src.module_utils.collector.CommandCollector.collect", side_effect=mock_collect): + result = config_module.execute_check(check) + assert result.status == TestStatus.SUCCESS.value + assert result.expected_value == "Min: 32000 1024000000 500 32000" + assert result.actual_value == "32000 1024000000 500 32768" + + def test_execute_check_min_list_validator_failure(self, config_module): + """Test check execution with min_list validator - values below minimum""" + config_module.set_context({"hostname": "testhost"}) + check = Check( + id="kernel_sem_check", + name="kernel.sem", + description="Kernel semaphore parameters", + category="OS", + workload="SAP", + severity=TestSeverity.HIGH, + collector_type="command", + collector_args={"command": "/sbin/sysctl kernel.sem -n"}, + validator_type="min_list", + validator_args={ + "min_values": ["32000", "1024000000", "500", "32000"], + "separator": " ", + }, + tags=["kernel"], + applicability=[], + references={}, + report="check", + ) + + def mock_collect(check_obj, context): + return "32000 1024000000 500 31999" + + with patch("src.module_utils.collector.CommandCollector.collect", side_effect=mock_collect): + result = config_module.execute_check(check) + assert result.status == TestStatus.ERROR.value + assert result.expected_value == "Min: 32000 1024000000 500 32000" + assert result.actual_value == "32000 1024000000 500 31999" + class TestExecuteCheckWithRetry: """Test suite for execute_check_with_retry method""" From 5aaa577713b90c23bd16869212eca110cdaa368c Mon Sep 17 00:00:00 2001 From: devanshjain Date: Wed, 22 Oct 2025 23:12:45 +0000 Subject: [PATCH 11/17] Improve validation logic in ConfigurationCheckModule to handle integer conversion errors and sys.maxsize checks --- src/modules/configuration_check_module.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/src/modules/configuration_check_module.py b/src/modules/configuration_check_module.py index ad1cfd63..759c0cd0 100644 --- a/src/modules/configuration_check_module.py +++ b/src/modules/configuration_check_module.py @@ -9,6 +9,7 @@ import time import json import re +import sys from typing import Optional, Dict, Any, List, Type from datetime import datetime from concurrent.futures import ThreadPoolExecutor @@ -524,9 +525,20 @@ def validate_min_list(self, check: Check, collected_data: str) -> Dict[str, Any] return { "status": self._create_validation_result(check.severity, False), } - all_valid = all( - int(actual) >= int(minimum) for actual, minimum in zip(collected_values, min_values) - ) + all_valid = True + for actual, minimum in zip(collected_values, min_values): + try: + actual_int = int(actual) + minimum_int = int(minimum) + if actual_int > sys.maxsize or minimum_int > sys.maxsize: + continue + if actual_int < minimum_int: + all_valid = False + break + except (ValueError, OverflowError): + all_valid = False + break + return { "status": self._create_validation_result(check.severity, all_valid), } From 6ab09fa89bf7a205b399578a22120dfd274c4bd9 Mon Sep 17 00:00:00 2001 From: devanshjain Date: Wed, 22 Oct 2025 23:19:49 +0000 Subject: [PATCH 12/17] Update DB2 user handling and command execution in db2.yml; include db2sid in user list and source db2profile for commands --- .../configuration_checks/tasks/files/db2.yml | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/roles/configuration_checks/tasks/files/db2.yml b/src/roles/configuration_checks/tasks/files/db2.yml index d058259c..a896ab97 100644 --- a/src/roles/configuration_checks/tasks/files/db2.yml +++ b/src/roles/configuration_checks/tasks/files/db2.yml @@ -73,8 +73,8 @@ enums: user: - root: &root "root" - sidadm: &sidadm "sidadm" - - db2adm: &db2sid "db2sid" - - all_users: &user [*root, *sidadm] + - db2sid: &db2sid "db2sid" + - all_users: &user [*root, *sidadm, *db2sid] validator_type: - string: &string "string" @@ -384,7 +384,7 @@ checks: database_type: [*db2] collector_type: *command collector_args: - command: "db2pd -alldbs -hadr | grep -i 'HADR_TIMEOUT' | awk '{print $NF}'" + command: ". ~/sqllib/db2profile && db2pd -alldbs -hadr | grep -i 'HADR_TIMEOUT' | awk '{print $NF}'" user: *db2sid validator_type: *string validator_args: @@ -432,7 +432,7 @@ checks: database_type: [*db2] collector_type: *command collector_args: - command: "db2pd -alldbs -hadr | grep -i 'PEER_WINDOW(seconds)' | awk '{print $NF}'" + command: ". ~/sqllib/db2profile && db2pd -alldbs -hadr | grep -i 'PEER_WINDOW(seconds)' | awk '{print $NF}'" user: *db2sid validator_type: *string validator_args: @@ -457,7 +457,7 @@ checks: high_availability_agent: *sbd collector_type: *command collector_args: - command: "db2pd -alldbs -hadr | grep -i 'PEER_WINDOW(seconds)' | awk '{print $NF}'" + command: ". ~/sqllib/db2profile && db2pd -alldbs -hadr | grep -i 'PEER_WINDOW(seconds)' | awk '{print $NF}'" user: *db2sid validator_type: *string validator_args: @@ -482,7 +482,7 @@ checks: high_availability_agent: *fencing_agent collector_type: *command collector_args: - command: "db2pd -alldbs -hadr | grep -i 'PEER_WINDOW(seconds)' | awk '{print $NF}'" + command: ". ~/sqllib/db2profile && db2pd -alldbs -hadr | grep -i 'PEER_WINDOW(seconds)' | awk '{print $NF}'" user: *db2sid validator_type: *string validator_args: @@ -531,7 +531,7 @@ checks: database_type: [*db2] collector_type: *command collector_args: - command: "output=$(db2pd -dbmcfg | grep INSTANCE_MEMORY | awk '{print $NF}'); [[ $output -le 100 ]] && echo $output || echo $(awk -v output=$output 'BEGIN {print output * 4096 / 1024 / 1024 / 1024}')" + command: ". ~/sqllib/db2profile && output=$(db2pd -dbmcfg | grep INSTANCE_MEMORY | awk '{print $NF}'); [[ $output -le 100 ]] && echo $output || echo $(awk -v output=$output 'BEGIN {print output * 4096 / 1024 / 1024 / 1024}')" user: *db2sid validator_type: *range validator_args: @@ -592,7 +592,7 @@ checks: hardware_type: *vm storage_type: *all_storage role: [*db_role] - database_type: [*hana] + database_type: [*db2] collector_type: *azure collector_args: resource_type: "lvm_groups" @@ -613,7 +613,7 @@ checks: hardware_type: *vm storage_type: *all_storage role: [*db_role] - database_type: [*hana] + database_type: [*db2] collector_type: *azure collector_args: resource_type: "lvm_volumes" @@ -634,7 +634,7 @@ checks: hardware_type: *vm storage_type: *all_storage role: [*db_role] - database_type: [*hana] + database_type: [*db2] collector_type: *azure collector_args: resource_type: "anf_volumes" From 468f84f5a3cd83e1e060b7956c5bd70a004e1e00 Mon Sep 17 00:00:00 2001 From: devanshjain Date: Wed, 22 Oct 2025 23:29:36 +0000 Subject: [PATCH 13/17] Enhance min_list validation in ConfigurationCheckModule to handle exceptions and improve error logging --- src/modules/configuration_check_module.py | 53 +++++++++++++---------- 1 file changed, 29 insertions(+), 24 deletions(-) diff --git a/src/modules/configuration_check_module.py b/src/modules/configuration_check_module.py index 759c0cd0..6a4bcab6 100644 --- a/src/modules/configuration_check_module.py +++ b/src/modules/configuration_check_module.py @@ -513,35 +513,40 @@ def validate_min_list(self, check: Check, collected_data: str) -> Dict[str, Any] """ min_values = check.validator_args.get("min_values", []) separator = check.validator_args.get("separator", " ") + try: - if not isinstance(min_values, list): - return { - "status": TestStatus.ERROR.value, - } + if not isinstance(min_values, list): + return { + "status": TestStatus.ERROR.value, + } - collected_values = str(collected_data).strip().split(separator) if collected_data else [] - collected_values = [val.strip() for val in collected_values if val.strip()] - if len(collected_values) != len(min_values): - return { - "status": self._create_validation_result(check.severity, False), - } - all_valid = True - for actual, minimum in zip(collected_values, min_values): - try: - actual_int = int(actual) - minimum_int = int(minimum) - if actual_int > sys.maxsize or minimum_int > sys.maxsize: - continue - if actual_int < minimum_int: + collected_values = ( + str(collected_data).strip().split(separator) if collected_data else [] + ) + collected_values = [val.strip() for val in collected_values if val.strip()] + if len(collected_values) != len(min_values): + return { + "status": self._create_validation_result(check.severity, False), + } + all_valid = True + for actual, minimum in zip(collected_values, min_values): + try: + actual_int = int(actual) + minimum_int = int(minimum) + if actual_int > sys.maxsize or minimum_int > sys.maxsize: + continue + if actual_int < minimum_int: + all_valid = False + break + except (ValueError, OverflowError): all_valid = False break - except (ValueError, OverflowError): - all_valid = False - break - return { - "status": self._create_validation_result(check.severity, all_valid), - } + return { + "status": self._create_validation_result(check.severity, all_valid), + } + except Exception as ex: + self.log(logging.ERROR, f"Error while validating min list {ex}") def validate_vm_support(self, check: Check, collected_data: str) -> Dict[str, Any]: """ From 9ba6b87dfc5ddfd5a06ec7c49eec721f99e08817 Mon Sep 17 00:00:00 2001 From: devanshjain Date: Wed, 22 Oct 2025 23:57:55 +0000 Subject: [PATCH 14/17] Update documentation and improve configuration check reporting; enhance error handling in validation logic --- README.md | 29 +++++++++------------ docs/CONFIGURATION_CHECKS.md | 17 ++++++++---- docs/SETUP.MD | 2 +- src/modules/configuration_check_module.py | 3 +++ src/playbook_00_configuration_checks.yml | 5 ++++ src/roles/misc/tasks/render-html-report.yml | 2 +- 6 files changed, 34 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index fa22c7f0..f61181c3 100644 --- a/README.md +++ b/README.md @@ -11,48 +11,42 @@ ## 🔍 Overview -The SAP Testing Automation Framework is an open-source orchestration tool designed to validate SAP deployments on Microsoft Azure. It enables you to assess system configurations against SAP on Azure best practices and guidelines. Additionally, the framework facilitates automation for various testing scenarios, including High Availability (HA) functional testing. +The SAP Testing Automation Framework is an open-source orchestration tool designed to validate SAP deployments on Microsoft Azure. It enables you to assess system configurations against SAP on Azure best practices and guidelines, and facilitates automation for various testing scenarios. > **NOTE**: This repository is currently in public preview and is intended for testing and feedback purposes. As this is an early release, it is not yet production-ready, and breaking changes can be introduced at any time. ![SAP Testing Automation Framework](./docs/images/sap-testing-automation-framework.png) -## Supported Configuration Matrix - -The following SAP components are supported in a two-node Pacemaker cluster running on SUSE Linux Enterprise Server (SLES) or Red Hat Enterprise Linux (RHEL): - -- **SAP HANA Scale-Up** -- **SAP Central Services** - -For additional information on supported configuration patterns, such as cluster types (Azure Fence Agent or SBD) and storage options (Azure Files or Azure NetApp Files) in this automated testing framework, refer to [supported high availability configuration](./docs/HIGH_AVAILABILITY.md). - ## 📊 Key Features - **High Availability Testing** - Thorough validation of the SAP HANA scale-up and SAP Central Services failover mechanism in a two node pacemaker cluster, ensuring the system operates correctly across various test cases. - **Configuration Validation** - Ensures that SAP HANA scale-up and SAP Central Services configurations comply with SAP on Azure best practices and guidelines. - **Functional Testing** - Executes test scenarios on the high availability setup to identify potential issues, whether during a new system deployment or before implementing cluster changes in a production environment. +- **Configuration Checks** - Validates OS parameters, database settings, Azure resources, and storage configurations against SAP and Azure best practices for supported databases. Performs comprehensive validation including kernel parameters, filesystem mounts, VM sizing, and network setup to ensure compliance with recommended guidelines. - **Detailed Reporting** - Generates comprehensive reports, highlighting configuration mismatch or deviations from recommended best practices. Includes failover test outcomes, any failures encountered, and logs with insights to aid in troubleshooting identified issues. ## 🏆 Purpose Testing is crucial for keeping SAP systems running smoothly, especially for critical business operations. This framework helps by addressing key challenges: -- **Preventing Risks** - It simulates system failures like node crashes, network issues, and storage failures to check if recovery mechanisms work properly, helping to catch problems before they affect real operations. -- **Meeting Compliance Requirements** - Many businesses need to prove their SAP systems are reliable. This framework provides detailed reports and logs that help with audits and ensure compliance with internal and regulatory standards. -- **Ensuring Quality** - The framework runs automated tests to verify whether the failover behavior of SAP components functions as expected on Azure across various test scenarios. It also ensures that the cluster and resource configurations are set up correctly, helping to maintain system reliability. -- **Automating Testing**: Manually testing high availability (HA) setups is slow and error-prone. This framework automates the process—from setup to reporting—saving time and ensuring more accurate and consistent results. +- **Preventing Risks** - Identifies configuration issues and validates system behavior before problems affect production operations. It simulates system failures like node crashes, network issues, and storage failures to check if recovery mechanisms work properly, helping to catch potential issues early. +- **Meeting Compliance Requirements** - Provides detailed reports and logs that help with audits and ensure compliance with internal and regulatory standards. +- **Ensuring Quality** - The framework runs automated tests to verify whether the failover behavior of SAP components functions as expected on Azure across various test scenarios. It also ensures that the cluster and resource configurations are set up correctly, helping to maintain system reliability. +- **Automating Testing** - Automates validation processes from configuration checks to reporting, saving time and ensuring consistent results. ## 🚦 Get Started There are two primary ways to get started with the SAP Testing Automated Framework. Choose the path that best fits your current environment and objectives: -### Option 1: [Integration with SAP Deployment Automation Framework (SDAF)](./docs/SDAF_INTEGRATION.md) +### Option 1: Integration with SAP Deployment Automation Framework (SDAF) If you already have [SDAF](https://github.com/Azure/sap-automation) environment set up, integrating the SAP Testing Automation Framework is a natural extension that allows you to leverage existing deployment pipelines and configurations. -### Option 2: [Getting Started with High Availability Testing (Standalone)](./docs/HIGH_AVAILABILITY.md) +### Option 2: Getting Started with High Availability Testing (Standalone) For users focused solely on validating SAP functionality and configurations, the standalone approach offers a streamlined process to test critical SAP components without the complexity of full deployment integration. + - For High Availability testing details, see the [High Availability documentation](./docs/HIGH_AVAILABILITY.md). + - For Configuration Checks and Testing details, see the [Configuration Checks documentation](./docs/CONFIGURATION_CHECKS.md). ## 🏗️ Architecture and Components @@ -68,7 +62,8 @@ For support and questions, please: ## 📚 Additional Resources - [Azure SAP Documentation](https://docs.microsoft.com/azure/sap) -- [SAP on Azure: High Availability Guide](https://docs.microsoft.com/azure/sap/workloads/sap-high-availability-guide-start) +- [Configuration Checks Guide](./docs/CONFIGURATION_CHECKS.md) +- [High Availability Testing Guide](./docs/HIGH_AVAILABILITY.md) ## 🤝 Contributing diff --git a/docs/CONFIGURATION_CHECKS.md b/docs/CONFIGURATION_CHECKS.md index a312a48a..5a528b8a 100644 --- a/docs/CONFIGURATION_CHECKS.md +++ b/docs/CONFIGURATION_CHECKS.md @@ -58,8 +58,16 @@ Follow the steps (2.1 - 2.2) in [Setup Guide for SAP Testing Automation Framewor > **Note**: High Availability (HA) configuration checks and functional tests are currently supported only for SAP HANA databases. For IBM DB2 databases, only non-HA configuration checks are available. +### 3. Required Access and Permissions -### 3. Test Execution +Ensure that the managed identity or service principal used by the controller virtual machine has the necessary permissions to access Azure resources and SAP systems for configuration validation. +1. "Reader" role to the user-assigned managed identity on the resource group containing the SAP VMs and the Azure Load Balancer. +1. "Reader" role to the user-assigned managed identity on the resource group containing the Azure NetApp Files account (if using Azure NetApp Files as shared storage). +1. "Reader" role to the user-assigned managed identity on the resource group containing the storage account (if using Azure File Share as shared storage). +1. "Reader" role to the user-assigned managed identity on the resource group containing the managed disks (if using Azure Managed Disks for SAP HANA data and log volumes). +1. "Reader" role to the user-assigned managed identity on the resource group containing the shared disks (if using Azure Shared Disks for SBD devices). + +### 4. Test Execution To execute the script, run following command: @@ -83,7 +91,7 @@ To execute the script, run following command: ./scripts/sap_automation_qa.sh --extra-vars='{"configuration_test_type":"ApplicationInstances"}' ``` -### 4. Viewing Test Results +### 5. Viewing Test Results After the test execution completes, a detailed HTML report is generated that summarizes the PASS/FAIL status of each test case and includes detailed execution logs for every step of the automation run. @@ -101,12 +109,11 @@ After the test execution completes, a detailed HTML report is generated that sum The report file is named using the following format: ``` - HA_{SAP_TIER}_{DATABASE_TYPE}_{OS_DISTRO_NAME}_{INVOCATION_ID}.html + CONFIG_{SAP_SID}_{DATABASE_TYPE}_{INVOCATION_ID}.html ``` - - `SAP_TIER`: The SAP tier tested (e.g., DB, SCS) + - `SAP_SID`: The SAP system ID (e.g., HN1, NWP) - `DATABASE_TYPE`: The database type (e.g., HANA) - - `OS_DISTRO_NAME`: The operating system distribution (e.g., SLES15SP4) - `INVOCATION_ID`: A unique identifier (Group invocation ID) for the test run which is logged at the end of test execution. Find example screenshot below: ![Test Execution Completion Screenshot](./images/execution_screenshot.png) diff --git a/docs/SETUP.MD b/docs/SETUP.MD index 91e2c935..ac57a4d7 100644 --- a/docs/SETUP.MD +++ b/docs/SETUP.MD @@ -44,7 +44,7 @@ For the framework to access the properties of the Azure Load Balancer in a high **Permissions required for Configuration Checks:** 1. "Reader" role to the user-assigned managed identity on the resource group containing the SAP VMs and the Azure Load Balancer. 1. "Reader" role to the user-assigned managed identity on the resource group containing the Azure NetApp Files account (if using Azure NetApp Files as shared storage). -1. "Storage Account Reader" role to the user-assigned managed identity on the resource group containing the storage account (if using Azure File Share as shared storage). +1. "Reader" role to the user-assigned managed identity on the resource group containing the storage account (if using Azure File Share as shared storage). 1. "Reader" role to the user-assigned managed identity on the resource group containing the managed disks (if using Azure Managed Disks for SAP HANA data and log volumes). 1. "Reader" role to the user-assigned managed identity on the resource group containing the shared disks (if using Azure Shared Disks for SBD devices). diff --git a/src/modules/configuration_check_module.py b/src/modules/configuration_check_module.py index 6a4bcab6..a153508f 100644 --- a/src/modules/configuration_check_module.py +++ b/src/modules/configuration_check_module.py @@ -547,6 +547,9 @@ def validate_min_list(self, check: Check, collected_data: str) -> Dict[str, Any] } except Exception as ex: self.log(logging.ERROR, f"Error while validating min list {ex}") + return { + "status": TestStatus.ERROR.value, + } def validate_vm_support(self, check: Check, collected_data: str) -> Dict[str, Any]: """ diff --git a/src/playbook_00_configuration_checks.yml b/src/playbook_00_configuration_checks.yml index 1c1f5df3..d1f7603f 100644 --- a/src/playbook_00_configuration_checks.yml +++ b/src/playbook_00_configuration_checks.yml @@ -438,3 +438,8 @@ ansible.builtin.include_tasks: "./roles/misc/tasks/render-html-report.yml" vars: html_template_name: "./templates/config_checks_report.html" + report_file_name: "CONFIG_{{ sap_sid | upper }}_{{ platform | upper }}_{{ test_group_invocation_id }}" + + - name: "Debug the file name of the report generated" + ansible.builtin.debug: + msg: "Report file CONFIG_{{ sap_sid | upper }}_{{ platform | upper }}_{{ test_group_invocation_id }} generated." diff --git a/src/roles/misc/tasks/render-html-report.yml b/src/roles/misc/tasks/render-html-report.yml index 8e41f712..77e6be78 100644 --- a/src/roles/misc/tasks/render-html-report.yml +++ b/src/roles/misc/tasks/render-html-report.yml @@ -16,7 +16,7 @@ - name: "Read the log file and create a HTML report" render_html_report: test_group_invocation_id: "{{ test_group_invocation_id }}" - test_group_name: "{{ test_group_name }}_{{ ansible_os_family | upper }}" + test_group_name: "{{ report_file_name | default(test_group_name + '_' + ansible_os_family | upper) }}" report_template: "{{ html_report_template }}" workspace_directory: "{{ _workspace_directory }}" test_case_results: "{{ all_results | default([]) }}" From 495116e46e17a7ddf594d1b3fbd36c99ecadc6ea Mon Sep 17 00:00:00 2001 From: devanshjain Date: Thu, 23 Oct 2025 00:06:45 +0000 Subject: [PATCH 15/17] Refactor test cases in get_azure_lb_test.py to streamline frontend_ip_configurations initialization --- tests/modules/get_azure_lb_test.py | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/tests/modules/get_azure_lb_test.py b/tests/modules/get_azure_lb_test.py index 44c7c836..457bb7ca 100644 --- a/tests/modules/get_azure_lb_test.py +++ b/tests/modules/get_azure_lb_test.py @@ -128,13 +128,12 @@ def test_load_balancer_missing_private_ip(self, mocker): :param mocker: Mocking library for Python. :type mocker: _mocker.MagicMock """ + class LBWithoutPrivateIP: def __init__(self): self.name = "public-lb" self.location = "test" - self.frontend_ip_configurations = [ - {"public_ip_address": "1.2.3.4"} - ] + self.frontend_ip_configurations = [{"public_ip_address": "1.2.3.4"}] self.load_balancing_rules = [] self.probes = [] @@ -151,6 +150,7 @@ def as_dict(self): patched_client.return_value.load_balancers.list_all.return_value = [ LBWithoutPrivateIP(), LoadBalancer("test", "127.0.0.1"), + ] azure_lb = AzureLoadBalancer( module_params={ @@ -185,13 +185,12 @@ def test_load_balancer_camelcase_ip_address(self, mocker): :param mocker: Mocking library for Python. :type mocker: _mocker.MagicMock """ + class LBWithCamelCase: def __init__(self): self.name = "camelcase-lb" self.location = "test" - self.frontend_ip_configurations = [ - {"privateIpAddress": "192.168.1.1"} - ] + self.frontend_ip_configurations = [{"privateIpAddress": "192.168.1.1"}] self.load_balancing_rules = [] self.probes = [] @@ -242,16 +241,13 @@ def test_load_balancer_nested_properties(self, mocker): :param mocker: Mocking library for Python. :type mocker: _mocker.MagicMock """ + class LBWithNestedProperties: def __init__(self): self.name = "nested-lb" self.location = "test" self.frontend_ip_configurations = [ - { - "properties": { - "private_ip_address": "10.0.0.5" - } - } + {"properties": {"private_ip_address": "10.0.0.5"}} ] self.load_balancing_rules = [] self.probes = [] From 9c1e470d65f67bcf82c3d0c965176d6de00700e6 Mon Sep 17 00:00:00 2001 From: devanshjain Date: Thu, 23 Oct 2025 00:22:42 +0000 Subject: [PATCH 16/17] Refactor AzureLoadBalancer to simplify private IP address retrieval; update test case for frontend_ip_configurations --- src/modules/get_azure_lb.py | 2 -- tests/modules/get_azure_lb_test.py | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/src/modules/get_azure_lb.py b/src/modules/get_azure_lb.py index 966f241b..16f0faf7 100644 --- a/src/modules/get_azure_lb.py +++ b/src/modules/get_azure_lb.py @@ -246,8 +246,6 @@ def get_private_ip_from_config(config): private_ip = ( config.get("private_ip_address") or config.get("privateIpAddress") - or config.get("properties", {}).get("private_ip_address") - or config.get("properties", {}).get("privateIpAddress") ) return private_ip diff --git a/tests/modules/get_azure_lb_test.py b/tests/modules/get_azure_lb_test.py index 457bb7ca..bc9e7fd8 100644 --- a/tests/modules/get_azure_lb_test.py +++ b/tests/modules/get_azure_lb_test.py @@ -247,7 +247,7 @@ def __init__(self): self.name = "nested-lb" self.location = "test" self.frontend_ip_configurations = [ - {"properties": {"private_ip_address": "10.0.0.5"}} + {"private_ip_address": "10.0.0.5"} ] self.load_balancing_rules = [] self.probes = [] From e1f159af41ec1f0aef9c58bbf80528bd95b39d0a Mon Sep 17 00:00:00 2001 From: devanshjain Date: Thu, 23 Oct 2025 04:34:38 +0000 Subject: [PATCH 17/17] Add Azure login instructions to CONFIGURATION_CHECKS.md; update section numbering for clarity --- docs/CONFIGURATION_CHECKS.md | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/docs/CONFIGURATION_CHECKS.md b/docs/CONFIGURATION_CHECKS.md index 5a528b8a..1a65eab3 100644 --- a/docs/CONFIGURATION_CHECKS.md +++ b/docs/CONFIGURATION_CHECKS.md @@ -67,7 +67,22 @@ Ensure that the managed identity or service principal used by the controller vir 1. "Reader" role to the user-assigned managed identity on the resource group containing the managed disks (if using Azure Managed Disks for SAP HANA data and log volumes). 1. "Reader" role to the user-assigned managed identity on the resource group containing the shared disks (if using Azure Shared Disks for SBD devices). -### 4. Test Execution +### 4. Azure Login (required) + +Ensure that you are logged into Azure CLI on the controller VM with the appropriate subscription context: + +```bash +# Login to Azure using System Assigned Managed Identity +az login --identity + +# Login to Azure using User Assigned Managed Identity +az login --identity -u + +# Set the desired subscription context +az account set --subscription +``` + +### 5. Test Execution To execute the script, run following command: @@ -91,7 +106,7 @@ To execute the script, run following command: ./scripts/sap_automation_qa.sh --extra-vars='{"configuration_test_type":"ApplicationInstances"}' ``` -### 5. Viewing Test Results +### 6. Viewing Test Results After the test execution completes, a detailed HTML report is generated that summarizes the PASS/FAIL status of each test case and includes detailed execution logs for every step of the automation run.